diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -540,6 +540,7 @@ MachineBasicBlock &MBB) const { const RISCVRegisterInfo *RI = STI.getRegisterInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); auto *RVFI = MF.getInfo(); Register FPReg = getFPReg(STI); Register SPReg = getSPReg(STI); @@ -606,6 +607,50 @@ adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg, SecondSPAdjustAmount, MachineInstr::FrameDestroy); + + // Emit ".cfi_def_cfa_offset FirstSPAdjustAmount" if using an sp-based CFA + if (!hasFP(MF)) { + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::cfiDefCfaOffset(nullptr, -FirstSPAdjustAmount)); + BuildMI(MBB, LastFrameDestroy, DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } + + if (hasFP(MF)) { + // To find the instruction restoring FP from stack. + for (auto &I = LastFrameDestroy; I != MBBI; ++I) { + if (I->mayLoad() && I->getOperand(0).isReg()) { + Register DestReg = I->getOperand(0).getReg(); + if (DestReg == FPReg) { + // If there is frame pointer, after restoring $fp registers, we + // need adjust CFA back to the correct sp-based offset. + // Emit ".cfi_def_cfa $sp, CFAOffset" + uint64_t CFAOffset = + FirstSPAdjustAmount + ? FirstSPAdjustAmount + RVFI->getVarArgsSaveSize() + : FPOffset; + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), CFAOffset)); + BuildMI(MBB, std::next(I), DL, + TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + break; + } + } + } + } + + // Add CFI directives for callee-saved registers. + // Iterate over list of callee-saved registers and emit .cfi_restore + // directives. + for (const auto &Entry : CSI) { + Register Reg = Entry.getReg(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore( + nullptr, RI->getDwarfRegNum(Reg, true))); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } if (FirstSPAdjustAmount) @@ -616,6 +661,13 @@ // Emit epilogue for shadow call stack. emitSCSEpilogue(MF, MBB, MBBI, DL); + + // After restoring $sp, we need to adjust CFA to $(sp + 0) + // Emit ".cfi_def_cfa_offset 0" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } StackOffset diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll --- a/llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll @@ -7,10 +7,12 @@ define void @foo() { ; RV32I-LABEL: foo: ; RV32I: # %bb.0: # %entry +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: foo: ; RV64I: # %bb.0: # %entry +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -13,6 +13,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a1: @@ -20,6 +21,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -32,6 +34,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 1073 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a2: @@ -39,6 +42,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 37 %tmp1 = mul i32 %tmp0, 29 @@ -56,6 +60,7 @@ ; RV32IMB-NEXT: addi a0, a2, 1073 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_a3: @@ -63,6 +68,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 1073 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 37 %tmp1 = mul i64 %tmp0, 29 @@ -77,6 +83,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b1: @@ -86,6 +93,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -100,6 +108,7 @@ ; RV32IMB-NEXT: lui a1, 50 ; RV32IMB-NEXT: addi a1, a1, 1119 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b2: @@ -109,6 +118,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 8953 %tmp1 = mul i32 %tmp0, 23 @@ -128,6 +138,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_accept_b3: @@ -137,6 +148,7 @@ ; RV64IMB-NEXT: lui a1, 50 ; RV64IMB-NEXT: addiw a1, a1, 1119 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 8953 %tmp1 = mul i64 %tmp0, 23 @@ -149,6 +161,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a1: @@ -156,6 +169,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -168,6 +182,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a2: @@ -175,6 +190,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1971 %tmp1 = mul i32 %tmp0, 29 @@ -194,6 +210,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_a3: @@ -201,6 +218,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1971 %tmp1 = mul i64 %tmp0, 29 @@ -213,6 +231,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c1: @@ -221,6 +240,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -233,6 +253,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c2: @@ -241,6 +262,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: sext.w a0, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 73 @@ -260,6 +282,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_c3: @@ -267,6 +290,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 73 @@ -279,6 +303,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d1: @@ -286,6 +311,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -298,6 +324,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1000 ; RV32IMB-NEXT: sh1add a0, a0, a0 ; RV32IMB-NEXT: slli a0, a0, 6 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d2: @@ -305,6 +332,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slliw a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i32 %x, 1000 %tmp1 = mul i32 %tmp0, 192 @@ -326,6 +354,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_d3: @@ -333,6 +362,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1000 ; RV64IMB-NEXT: sh1add a0, a0, a0 ; RV64IMB-NEXT: slli a0, a0, 6 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = add i64 %x, 1000 %tmp1 = mul i64 %tmp0, 192 @@ -345,6 +375,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e1: @@ -352,6 +383,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -364,6 +396,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1971 ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e2: @@ -371,6 +404,7 @@ ; RV64IMB-NEXT: addiw a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57159 @@ -390,6 +424,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_e3: @@ -397,6 +432,7 @@ ; RV64IMB-NEXT: addi a0, a0, 1971 ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57159 @@ -410,6 +446,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f1: @@ -418,6 +455,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -431,6 +469,7 @@ ; RV32IMB-NEXT: addi a1, zero, 29 ; RV32IMB-NEXT: mul a0, a0, a1 ; RV32IMB-NEXT: addi a0, a0, 11 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f2: @@ -439,6 +478,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mulw a0, a0, a1 ; RV64IMB-NEXT: addiw a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 29 %tmp1 = add i32 %tmp0, 57199 @@ -458,6 +498,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_f3: @@ -466,6 +507,7 @@ ; RV64IMB-NEXT: addi a1, zero, 29 ; RV64IMB-NEXT: mul a0, a0, a1 ; RV64IMB-NEXT: addi a0, a0, 11 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 29 %tmp1 = add i64 %tmp0, 57199 @@ -479,6 +521,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g1: @@ -487,6 +530,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -500,6 +544,7 @@ ; RV32IMB-NEXT: sh3add a1, a0, a0 ; RV32IMB-NEXT: sh3add a0, a1, a0 ; RV32IMB-NEXT: addi a0, a0, 10 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g2: @@ -508,6 +553,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addiw a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 73 %tmp1 = add i32 %tmp0, 7310 @@ -527,6 +573,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_reject_g3: @@ -535,6 +582,7 @@ ; RV64IMB-NEXT: sh3add a1, a0, a0 ; RV64IMB-NEXT: sh3add a0, a1, a0 ; RV64IMB-NEXT: addi a0, a0, 10 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 73 %tmp1 = add i64 %tmp0, 7310 @@ -555,6 +603,7 @@ ; RV32IMB-NEXT: addi a0, a0, 1024 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: add_mul_combine_infinite_loop: @@ -563,6 +612,7 @@ ; RV64IMB-NEXT: lui a1, 1 ; RV64IMB-NEXT: addiw a1, a1, -2048 ; RV64IMB-NEXT: sh3add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 24 %tmp1 = add i64 %tmp0, 2048 @@ -578,6 +628,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_a: @@ -588,6 +639,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -603,6 +655,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_b: @@ -613,6 +666,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, 8990 @@ -633,6 +687,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_add8990_c: @@ -643,6 +698,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, 8990 @@ -658,6 +714,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_a: @@ -668,6 +725,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -683,6 +741,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_b: @@ -693,6 +752,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, 3000 %tmp1 = add i32 %tmp0, -8990 @@ -714,6 +774,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mul3000_sub8990_c: @@ -724,6 +785,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, 3000 %tmp1 = add i64 %tmp0, -8990 @@ -739,6 +801,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_a: @@ -749,6 +812,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -764,6 +828,7 @@ ; RV32IMB-NEXT: lui a1, 2 ; RV32IMB-NEXT: addi a1, a1, 798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_b: @@ -774,6 +839,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, 8990 @@ -795,6 +861,7 @@ ; RV32IMB-NEXT: add a0, a2, a0 ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_add8990_c: @@ -805,6 +872,7 @@ ; RV64IMB-NEXT: lui a1, 2 ; RV64IMB-NEXT: addiw a1, a1, 798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, 8990 @@ -820,6 +888,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_a: @@ -830,6 +899,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -845,6 +915,7 @@ ; RV32IMB-NEXT: lui a1, 1048574 ; RV32IMB-NEXT: addi a1, a1, -798 ; RV32IMB-NEXT: add a0, a0, a1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_b: @@ -855,6 +926,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: addw a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i32 %x, -3000 %tmp1 = add i32 %tmp0, -8990 @@ -877,6 +949,7 @@ ; RV32IMB-NEXT: sltu a2, a0, a2 ; RV32IMB-NEXT: add a1, a1, a2 ; RV32IMB-NEXT: addi a1, a1, -1 +; RV32IMB-NEXT: .cfi_def_cfa_offset 0 ; RV32IMB-NEXT: ret ; ; RV64IMB-LABEL: mulneg3000_sub8990_c: @@ -887,6 +960,7 @@ ; RV64IMB-NEXT: lui a1, 1048574 ; RV64IMB-NEXT: addiw a1, a1, -798 ; RV64IMB-NEXT: add a0, a0, a1 +; RV64IMB-NEXT: .cfi_def_cfa_offset 0 ; RV64IMB-NEXT: ret %tmp0 = mul i64 %x, -3000 %tmp1 = add i64 %tmp0, -8990 diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll --- a/llvm/test/CodeGen/RISCV/addrspacecast.ll +++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll @@ -8,11 +8,13 @@ ; RV32I-LABEL: cast0: ; RV32I: # %bb.0: ; RV32I-NEXT: sw zero, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast0: ; RV64I: # %bb.0: ; RV64I-NEXT: sw zero, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %ptr0 = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(0)* store i32 0, i32* %ptr0 @@ -28,7 +30,9 @@ ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: call foo@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cast1: @@ -39,7 +43,9 @@ ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: call foo@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %castptr = addrspacecast i32* %ptr to i32 addrspace(10)* call void @foo(i32 addrspace(10)* %castptr) diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll --- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll +++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll @@ -62,6 +62,7 @@ ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: beq a0, a1, .LBB1_1 ; RV64I-NEXT: # %bb.2: # %bar +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret br label %bb diff --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll --- a/llvm/test/CodeGen/RISCV/alu32.ll +++ b/llvm/test/CodeGen/RISCV/alu32.ll @@ -133,12 +133,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 3 ; RV32I-NEXT: ori a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: srli_demandedbits: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 3 ; RV64I-NEXT: ori a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %2 = lshr i32 %0, 3 %3 = or i32 %2, 1 diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -1045,6 +1045,7 @@ ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i32: @@ -1062,6 +1063,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) %2 = and i32 %1, 1 @@ -1084,6 +1086,7 @@ ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test_parity_i64: @@ -1101,6 +1104,7 @@ ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = call i64 @llvm.ctpop.i64(i64 %a) %2 = and i64 %1, 1 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-float.ll @@ -15,12 +15,14 @@ ; RV64-NEXT: fadd.s ft0, ft1, ft0 ; RV64-NEXT: fmv.x.w a0, ft0 ; RV64-NEXT: fmv.x.w a1, ft2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v2f32: ; RV64LP64F: # %bb.0: ; RV64LP64F-NEXT: fadd.s fa0, fa0, fa2 ; RV64LP64F-NEXT: fadd.s fa1, fa1, fa3 +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <2 x float> %x, %y ret <2 x float> %z @@ -45,6 +47,7 @@ ; RV64-NEXT: fsw ft1, 8(a0) ; RV64-NEXT: fsw ft3, 4(a0) ; RV64-NEXT: fsw ft5, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV64LP64F-LABEL: callee_v4f32: @@ -57,6 +60,7 @@ ; RV64LP64F-NEXT: fsw ft2, 8(a0) ; RV64LP64F-NEXT: fsw ft1, 4(a0) ; RV64LP64F-NEXT: fsw ft0, 0(a0) +; RV64LP64F-NEXT: .cfi_def_cfa_offset 0 ; RV64LP64F-NEXT: ret %z = fadd <4 x float> %x, %y ret <4 x float> %z diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -144,6 +144,7 @@ ; RV32IFD-NEXT: mv a0, a1 ; RV32IFD-NEXT: .LBB5_2: ; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_wu_d_multiple_use: @@ -155,6 +156,7 @@ ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: mv a0, a1 ; RV64IFD-NEXT: .LBB5_2: +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %a = fptoui double %x to i32 %b = icmp eq i32 %a, 0 @@ -639,6 +641,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.w ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_w_demanded_bits: @@ -646,6 +649,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.w ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to double @@ -660,6 +664,7 @@ ; RV32IFD-NEXT: addi a0, a0, 1 ; RV32IFD-NEXT: fcvt.d.wu ft0, a0 ; RV32IFD-NEXT: fsd ft0, 0(a1) +; RV32IFD-NEXT: .cfi_def_cfa_offset 0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits: @@ -667,6 +672,7 @@ ; RV64IFD-NEXT: addiw a0, a0, 1 ; RV64IFD-NEXT: fcvt.d.wu ft0, a0 ; RV64IFD-NEXT: fsd ft0, 0(a1) +; RV64IFD-NEXT: .cfi_def_cfa_offset 0 ; RV64IFD-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to double diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll --- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll +++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll @@ -40,7 +40,11 @@ ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_4: # %lpad ; RV32I-NEXT: .Ltmp4: @@ -77,7 +81,11 @@ ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB0_4: # %lpad ; RV64I-NEXT: .Ltmp4: @@ -111,10 +119,12 @@ define internal void @callee(i1* %p) { ; RV32I-LABEL: callee: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: callee: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ret void } diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -82,6 +82,7 @@ ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: mv a0, a1 ; RV32IF-NEXT: .LBB3_2: +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_wu_s_multiple_use: @@ -93,6 +94,7 @@ ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB3_2: +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %a = fptoui float %x to i32 %b = icmp eq i32 %a, 0 @@ -526,6 +528,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.w ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_w_demanded_bits: @@ -533,6 +536,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.w ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to float @@ -547,6 +551,7 @@ ; RV32IF-NEXT: addi a0, a0, 1 ; RV32IF-NEXT: fcvt.s.wu ft0, a0 ; RV32IF-NEXT: fsw ft0, 0(a1) +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_s_wu_demanded_bits: @@ -554,6 +559,7 @@ ; RV64IF-NEXT: addiw a0, a0, 1 ; RV64IF-NEXT: fcvt.s.wu ft0, a0 ; RV64IF-NEXT: fsw ft0, 0(a1) +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to float diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -11,6 +11,7 @@ ; RV32IF-NEXT: addi a1, a1, 769 ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_01: @@ -21,6 +22,7 @@ ; RV64IF-NEXT: addiw a1, a1, 769 ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret %rm = call i32 @llvm.flt.rounds() ret i32 %rm @@ -35,6 +37,7 @@ ; RV32IF-NEXT: srl a0, a1, a0 ; RV32IF-NEXT: andi a0, a0, 7 ; RV32IF-NEXT: fsrm a0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_02: @@ -46,6 +49,7 @@ ; RV64IF-NEXT: srl a0, a1, a0 ; RV64IF-NEXT: andi a0, a0, 7 ; RV64IF-NEXT: fsrm a0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 %rm) ret void @@ -55,11 +59,13 @@ ; RV32IF-LABEL: func_03: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 1 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_03: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 1 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 0) ret void @@ -69,11 +75,13 @@ ; RV32IF-LABEL: func_04: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 0 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_04: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 0 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 1) ret void @@ -83,11 +91,13 @@ ; RV32IF-LABEL: func_05: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 3 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_05: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 3 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 2) ret void @@ -97,11 +107,13 @@ ; RV32IF-LABEL: func_06: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 2 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_06: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 2 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 3) ret void @@ -111,11 +123,13 @@ ; RV32IF-LABEL: func_07: ; RV32IF: # %bb.0: ; RV32IF-NEXT: fsrmi 4 +; RV32IF-NEXT: .cfi_def_cfa_offset 0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: func_07: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fsrmi 4 +; RV64IF-NEXT: .cfi_def_cfa_offset 0 ; RV64IF-NEXT: ret call void @llvm.set.rounding(i32 4) ret void diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll --- a/llvm/test/CodeGen/RISCV/frame-info.ll +++ b/llvm/test/CodeGen/RISCV/frame-info.ll @@ -11,10 +11,12 @@ define void @trivial() { ; RV32-LABEL: trivial: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trivial: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: trivial: @@ -28,8 +30,12 @@ ; RV32-WITHFP-NEXT: addi s0, sp, 16 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: trivial: @@ -43,8 +49,12 @@ ; RV64-WITHFP-NEXT: addi s0, sp, 16 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_restore ra +; RV64-WITHFP-NEXT: .cfi_restore s0 ; RV64-WITHFP-NEXT: addi sp, sp, 16 +; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV64-WITHFP-NEXT: ret ret void } @@ -67,8 +77,12 @@ ; RV32-NEXT: call callee_with_args@plt ; RV32-NEXT: addi sp, s0, -16 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: stack_alloc: @@ -90,8 +104,12 @@ ; RV64-NEXT: call callee_with_args@plt ; RV64-NEXT: addi sp, s0, -16 ; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: stack_alloc: @@ -111,8 +129,12 @@ ; RV32-WITHFP-NEXT: call callee_with_args@plt ; RV32-WITHFP-NEXT: addi sp, s0, -16 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: stack_alloc: @@ -134,8 +156,12 @@ ; RV64-WITHFP-NEXT: call callee_with_args@plt ; RV64-WITHFP-NEXT: addi sp, s0, -16 ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_restore ra +; RV64-WITHFP-NEXT: .cfi_restore s0 ; RV64-WITHFP-NEXT: addi sp, sp, 16 +; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV64-WITHFP-NEXT: ret entry: %0 = alloca i8, i32 %size, align 16 @@ -157,7 +183,9 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call callee2@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: branch_and_tail_call: @@ -173,7 +201,9 @@ ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call callee2@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-WITHFP-LABEL: branch_and_tail_call: @@ -193,8 +223,12 @@ ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV32-WITHFP-NEXT: call callee2@plt ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-WITHFP-NEXT: .cfi_restore ra +; RV32-WITHFP-NEXT: .cfi_restore s0 ; RV32-WITHFP-NEXT: addi sp, sp, 16 +; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32-WITHFP-NEXT: ret ; ; RV64-WITHFP-LABEL: branch_and_tail_call: @@ -214,8 +248,12 @@ ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0 ; RV64-WITHFP-NEXT: call callee2@plt ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-WITHFP-NEXT: .cfi_restore ra +; RV64-WITHFP-NEXT: .cfi_restore s0 ; RV64-WITHFP-NEXT: addi sp, sp, 16 +; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV64-WITHFP-NEXT: ret br i1 %a, label %blue_pill, label %red_pill blue_pill: diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -146,6 +146,7 @@ ; RV32IZFH-NEXT: # %bb.1: ; RV32IZFH-NEXT: mv a0, a1 ; RV32IZFH-NEXT: .LBB3_2: +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -156,6 +157,7 @@ ; RV32IDZFH-NEXT: # %bb.1: ; RV32IDZFH-NEXT: mv a0, a1 ; RV32IDZFH-NEXT: .LBB3_2: +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_ui_h_multiple_use: @@ -166,6 +168,7 @@ ; RV64IZFH-NEXT: # %bb.1: ; RV64IZFH-NEXT: mv a0, a1 ; RV64IZFH-NEXT: .LBB3_2: +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_ui_h_multiple_use: @@ -176,6 +179,7 @@ ; RV64IDZFH-NEXT: # %bb.1: ; RV64IDZFH-NEXT: mv a0, a1 ; RV64IDZFH-NEXT: .LBB3_2: +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %a = fptoui half %x to i32 %b = icmp eq i32 %a, 0 @@ -1154,6 +1158,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1161,6 +1166,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1168,6 +1174,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits: @@ -1175,6 +1182,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.w ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to half @@ -1189,6 +1197,7 @@ ; RV32IZFH-NEXT: addi a0, a0, 1 ; RV32IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IZFH-NEXT: fsh ft0, 0(a1) +; RV32IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1196,6 +1205,7 @@ ; RV32IDZFH-NEXT: addi a0, a0, 1 ; RV32IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV32IDZFH-NEXT: fsh ft0, 0(a1) +; RV32IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1203,6 +1213,7 @@ ; RV64IZFH-NEXT: addiw a0, a0, 1 ; RV64IZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IZFH-NEXT: fsh ft0, 0(a1) +; RV64IZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits: @@ -1210,6 +1221,7 @@ ; RV64IDZFH-NEXT: addiw a0, a0, 1 ; RV64IDZFH-NEXT: fcvt.h.wu ft0, a0 ; RV64IDZFH-NEXT: fsh ft0, 0(a1) +; RV64IDZFH-NEXT: .cfi_def_cfa_offset 0 ; RV64IDZFH-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to half diff --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll --- a/llvm/test/CodeGen/RISCV/imm.ll +++ b/llvm/test/CodeGen/RISCV/imm.ll @@ -915,6 +915,7 @@ ; RV32I-NEXT: lui a0, 263018 ; RV32I-NEXT: addi a0, a0, -795 ; RV32I-NEXT: addi a1, zero, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_5372288229: @@ -923,6 +924,7 @@ ; RV64I-NEXT: addiw a0, a0, 437 ; RV64I-NEXT: slli a0, a0, 13 ; RV64I-NEXT: addi a0, a0, -795 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_5372288229: @@ -930,6 +932,7 @@ ; RV64IZBA-NEXT: lui a0, 655797 ; RV64IZBA-NEXT: slli.uw a0, a0, 1 ; RV64IZBA-NEXT: addi a0, a0, -795 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_5372288229: @@ -937,6 +940,7 @@ ; RV64IZBS-NEXT: lui a0, 263018 ; RV64IZBS-NEXT: addiw a0, a0, -795 ; RV64IZBS-NEXT: bseti a0, a0, 32 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 5372288229 } @@ -947,6 +951,7 @@ ; RV32I-NEXT: lui a0, 785558 ; RV32I-NEXT: addi a0, a0, 795 ; RV32I-NEXT: addi a1, zero, -2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_5372288229: @@ -955,6 +960,7 @@ ; RV64I-NEXT: addiw a0, a0, -437 ; RV64I-NEXT: slli a0, a0, 13 ; RV64I-NEXT: addi a0, a0, 795 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_5372288229: @@ -962,6 +968,7 @@ ; RV64IZBA-NEXT: lui a0, 611378 ; RV64IZBA-NEXT: addiw a0, a0, 265 ; RV64IZBA-NEXT: sh1add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_5372288229: @@ -969,6 +976,7 @@ ; RV64IZBS-NEXT: lui a0, 785558 ; RV64IZBS-NEXT: addiw a0, a0, 795 ; RV64IZBS-NEXT: bclri a0, a0, 32 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -5372288229 } @@ -979,6 +987,7 @@ ; RV32I-NEXT: lui a0, 88838 ; RV32I-NEXT: addi a0, a0, -1325 ; RV32I-NEXT: addi a1, zero, 2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_8953813715: @@ -987,6 +996,7 @@ ; RV64I-NEXT: addiw a0, a0, -637 ; RV64I-NEXT: slli a0, a0, 13 ; RV64I-NEXT: addi a0, a0, -1325 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_8953813715: @@ -994,6 +1004,7 @@ ; RV64IZBA-NEXT: lui a0, 437198 ; RV64IZBA-NEXT: addiw a0, a0, -265 ; RV64IZBA-NEXT: sh2add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_8953813715: @@ -1001,6 +1012,7 @@ ; RV64IZBS-NEXT: lui a0, 88838 ; RV64IZBS-NEXT: addiw a0, a0, -1325 ; RV64IZBS-NEXT: bseti a0, a0, 33 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 8953813715 } @@ -1011,6 +1023,7 @@ ; RV32I-NEXT: lui a0, 959738 ; RV32I-NEXT: addi a0, a0, 1325 ; RV32I-NEXT: addi a1, zero, -3 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_8953813715: @@ -1019,6 +1032,7 @@ ; RV64I-NEXT: addiw a0, a0, 637 ; RV64I-NEXT: slli a0, a0, 13 ; RV64I-NEXT: addi a0, a0, 1325 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_8953813715: @@ -1026,6 +1040,7 @@ ; RV64IZBA-NEXT: lui a0, 611378 ; RV64IZBA-NEXT: addiw a0, a0, 265 ; RV64IZBA-NEXT: sh2add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_8953813715: @@ -1033,6 +1048,7 @@ ; RV64IZBS-NEXT: lui a0, 959738 ; RV64IZBS-NEXT: addiw a0, a0, 1325 ; RV64IZBS-NEXT: bclri a0, a0, 33 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -8953813715 } @@ -1043,6 +1059,7 @@ ; RV32I-NEXT: lui a0, 789053 ; RV32I-NEXT: addi a0, a0, 1711 ; RV32I-NEXT: addi a1, zero, 3 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_16116864687: @@ -1051,6 +1068,7 @@ ; RV64I-NEXT: addiw a0, a0, -1475 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, 1711 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_16116864687: @@ -1058,6 +1076,7 @@ ; RV64IZBA-NEXT: lui a0, 437198 ; RV64IZBA-NEXT: addiw a0, a0, -265 ; RV64IZBA-NEXT: sh3add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_16116864687: @@ -1066,6 +1085,7 @@ ; RV64IZBS-NEXT: addiw a0, a0, -1475 ; RV64IZBS-NEXT: slli a0, a0, 12 ; RV64IZBS-NEXT: addi a0, a0, 1711 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 16116864687 } @@ -1076,6 +1096,7 @@ ; RV32I-NEXT: lui a0, 259523 ; RV32I-NEXT: addi a0, a0, -1711 ; RV32I-NEXT: addi a1, zero, -4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_16116864687: @@ -1084,6 +1105,7 @@ ; RV64I-NEXT: addiw a0, a0, 1475 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, -1711 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_16116864687: @@ -1091,6 +1113,7 @@ ; RV64IZBA-NEXT: lui a0, 611378 ; RV64IZBA-NEXT: addiw a0, a0, 265 ; RV64IZBA-NEXT: sh3add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_16116864687: @@ -1099,6 +1122,7 @@ ; RV64IZBS-NEXT: addiw a0, a0, 1475 ; RV64IZBS-NEXT: slli a0, a0, 12 ; RV64IZBS-NEXT: addi a0, a0, -1711 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -16116864687 } @@ -1109,6 +1133,7 @@ ; RV32I-NEXT: lui a0, 572348 ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2344336315: @@ -1116,6 +1141,7 @@ ; RV64I-NEXT: lui a0, 143087 ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_2344336315: @@ -1123,6 +1149,7 @@ ; RV64IZBA-NEXT: lui a0, 143087 ; RV64IZBA-NEXT: slli a0, a0, 2 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_2344336315: @@ -1130,6 +1157,7 @@ ; RV64IZBS-NEXT: lui a0, 143087 ; RV64IZBS-NEXT: slli a0, a0, 2 ; RV64IZBS-NEXT: addi a0, a0, -1093 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 2344336315 ; 0x8bbbbbbb } @@ -1140,6 +1168,7 @@ ; RV32I-NEXT: lui a0, 506812 ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_70370820078523: @@ -1150,6 +1179,7 @@ ; RV64I-NEXT: addi a0, a0, -273 ; RV64I-NEXT: slli a0, a0, 14 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_70370820078523: @@ -1160,6 +1190,7 @@ ; RV64IZBA-NEXT: addi a0, a0, -273 ; RV64IZBA-NEXT: slli a0, a0, 14 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_70370820078523: @@ -1167,6 +1198,7 @@ ; RV64IZBS-NEXT: lui a0, 506812 ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bseti a0, a0, 46 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 70370820078523 ; 0x40007bbbbbbb } @@ -1177,6 +1209,7 @@ ; RV32I-NEXT: lui a0, 506812 ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_9223372034778874949: @@ -1188,6 +1221,7 @@ ; RV64I-NEXT: addi a0, a0, -273 ; RV64I-NEXT: slli a0, a0, 14 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_9223372034778874949: @@ -1199,6 +1233,7 @@ ; RV64IZBA-NEXT: addi a0, a0, -273 ; RV64IZBA-NEXT: slli a0, a0, 14 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_9223372034778874949: @@ -1206,6 +1241,7 @@ ; RV64IZBS-NEXT: lui a0, 506812 ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bseti a0, a0, 63 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -9223372034778874949 ; 0x800000007bbbbbbb } @@ -1216,6 +1252,7 @@ ; RV32I-NEXT: lui a0, 506812 ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 524292 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_9223301666034697285: @@ -1227,6 +1264,7 @@ ; RV64I-NEXT: addi a0, a0, -273 ; RV64I-NEXT: slli a0, a0, 14 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_9223301666034697285: @@ -1238,6 +1276,7 @@ ; RV64IZBA-NEXT: addi a0, a0, -273 ; RV64IZBA-NEXT: slli a0, a0, 14 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_9223301666034697285: @@ -1246,6 +1285,7 @@ ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bseti a0, a0, 46 ; RV64IZBS-NEXT: bseti a0, a0, 63 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -9223301666034697285 ; 0x800040007bbbbbbb } @@ -1256,6 +1296,7 @@ ; RV32I-NEXT: lui a0, 506812 ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2219066437: @@ -1263,6 +1304,7 @@ ; RV64I-NEXT: lui a0, 913135 ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_2219066437: @@ -1270,6 +1312,7 @@ ; RV64IZBA-NEXT: lui a0, 913135 ; RV64IZBA-NEXT: slli a0, a0, 2 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_2219066437: @@ -1277,6 +1320,7 @@ ; RV64IZBS-NEXT: lui a0, 913135 ; RV64IZBS-NEXT: slli a0, a0, 2 ; RV64IZBS-NEXT: addi a0, a0, -1093 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -2219066437 ; 0xffffffff7bbbbbbb } @@ -1288,6 +1332,7 @@ ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 1048575 ; RV32I-NEXT: addi a1, a1, 2047 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_8798043653189: @@ -1296,6 +1341,7 @@ ; RV64I-NEXT: addiw a0, a0, -273 ; RV64I-NEXT: slli a0, a0, 14 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_8798043653189: @@ -1304,6 +1350,7 @@ ; RV64IZBA-NEXT: addiw a0, a0, -273 ; RV64IZBA-NEXT: slli a0, a0, 14 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_8798043653189: @@ -1311,6 +1358,7 @@ ; RV64IZBS-NEXT: lui a0, 572348 ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bclri a0, a0, 43 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -8798043653189 ; 0xfffff7ff8bbbbbbb } @@ -1322,6 +1370,7 @@ ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_9223372034904144827: @@ -1331,6 +1380,7 @@ ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, 1911 ; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_9223372034904144827: @@ -1340,6 +1390,7 @@ ; RV64IZBA-NEXT: slli a0, a0, 12 ; RV64IZBA-NEXT: addi a0, a0, 1911 ; RV64IZBA-NEXT: srli a0, a0, 1 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_9223372034904144827: @@ -1347,6 +1398,7 @@ ; RV64IZBS-NEXT: lui a0, 572348 ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bclri a0, a0, 63 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 9223372034904144827 ; 0x7fffffff8bbbbbbb } @@ -1358,6 +1410,7 @@ ; RV32I-NEXT: addi a0, a0, -1093 ; RV32I-NEXT: lui a1, 524287 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_9223354442718100411: @@ -1369,6 +1422,7 @@ ; RV64I-NEXT: addi a0, a0, -273 ; RV64I-NEXT: slli a0, a0, 14 ; RV64I-NEXT: addi a0, a0, -1093 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_9223354442718100411: @@ -1380,6 +1434,7 @@ ; RV64IZBA-NEXT: addi a0, a0, -273 ; RV64IZBA-NEXT: slli a0, a0, 14 ; RV64IZBA-NEXT: addi a0, a0, -1093 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_9223354442718100411: @@ -1388,6 +1443,7 @@ ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: bclri a0, a0, 44 ; RV64IZBS-NEXT: bclri a0, a0, 63 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 9223354442718100411 ; 0x7fffefff8bbbbbbb } @@ -1398,6 +1454,7 @@ ; RV32I-NEXT: lui a0, 699051 ; RV32I-NEXT: addi a0, a0, -1366 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2863311530: @@ -1406,6 +1463,7 @@ ; RV64I-NEXT: addiw a0, a0, -1365 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, -1366 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_2863311530: @@ -1413,6 +1471,7 @@ ; RV64IZBA-NEXT: lui a0, 699051 ; RV64IZBA-NEXT: addiw a0, a0, -1366 ; RV64IZBA-NEXT: zext.w a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_2863311530: @@ -1420,6 +1479,7 @@ ; RV64IZBS-NEXT: lui a0, 174763 ; RV64IZBS-NEXT: addiw a0, a0, -1366 ; RV64IZBS-NEXT: bseti a0, a0, 31 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 2863311530 ; #0xaaaaaaaa } @@ -1430,6 +1490,7 @@ ; RV32I-NEXT: lui a0, 349525 ; RV32I-NEXT: addi a0, a0, 1366 ; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2863311530: @@ -1438,6 +1499,7 @@ ; RV64I-NEXT: addiw a0, a0, 1365 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, 1366 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_2863311530: @@ -1445,6 +1507,7 @@ ; RV64IZBA-NEXT: lui a0, 908766 ; RV64IZBA-NEXT: addiw a0, a0, -546 ; RV64IZBA-NEXT: sh2add a0, a0, a0 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_2863311530: @@ -1452,6 +1515,7 @@ ; RV64IZBS-NEXT: lui a0, 873813 ; RV64IZBS-NEXT: addiw a0, a0, 1366 ; RV64IZBS-NEXT: bclri a0, a0, 31 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -2863311530 ; #0xffffffff55555556 } @@ -1462,6 +1526,7 @@ ; RV32I-NEXT: lui a0, 524288 ; RV32I-NEXT: addi a0, a0, 1365 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_2147486378: @@ -1469,6 +1534,7 @@ ; RV64I-NEXT: addi a0, zero, 1 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, 1365 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_2147486378: @@ -1476,12 +1542,14 @@ ; RV64IZBA-NEXT: addi a0, zero, 1 ; RV64IZBA-NEXT: slli a0, a0, 31 ; RV64IZBA-NEXT: addi a0, a0, 1365 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_2147486378: ; RV64IZBS: # %bb.0: ; RV64IZBS-NEXT: addi a0, zero, 1365 ; RV64IZBS-NEXT: bseti a0, a0, 31 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 2147485013 } @@ -1492,6 +1560,7 @@ ; RV32I-NEXT: lui a0, 524288 ; RV32I-NEXT: addi a0, a0, -1365 ; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_neg_2147485013: @@ -1499,6 +1568,7 @@ ; RV64I-NEXT: addi a0, zero, -1 ; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, -1365 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_neg_2147485013: @@ -1506,12 +1576,14 @@ ; RV64IZBA-NEXT: addi a0, zero, -1 ; RV64IZBA-NEXT: slli a0, a0, 31 ; RV64IZBA-NEXT: addi a0, a0, -1365 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_neg_2147485013: ; RV64IZBS: # %bb.0: ; RV64IZBS-NEXT: addi a0, zero, -1365 ; RV64IZBS-NEXT: bclri a0, a0, 31 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 -2147485013 } @@ -1523,6 +1595,7 @@ ; RV32I-NEXT: addi a0, a0, 1979 ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, -1093 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_12900924131259: @@ -1531,6 +1604,7 @@ ; RV64I-NEXT: addiw a0, a0, -1093 ; RV64I-NEXT: slli a0, a0, 24 ; RV64I-NEXT: addi a0, a0, 1979 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_12900924131259: @@ -1538,6 +1612,7 @@ ; RV64IZBA-NEXT: lui a0, 768955 ; RV64IZBA-NEXT: slli.uw a0, a0, 12 ; RV64IZBA-NEXT: addi a0, a0, 1979 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_12900924131259: @@ -1546,6 +1621,7 @@ ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: slli a0, a0, 24 ; RV64IZBS-NEXT: addi a0, a0, 1979 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 12900924131259 } @@ -1555,6 +1631,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 768944 ; RV32I-NEXT: addi a1, zero, 11 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_50394234880: @@ -1562,12 +1639,14 @@ ; RV64I-NEXT: lui a0, 188 ; RV64I-NEXT: addiw a0, a0, -1093 ; RV64I-NEXT: slli a0, a0, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_50394234880: ; RV64IZBA: # %bb.0: ; RV64IZBA-NEXT: lui a0, 768955 ; RV64IZBA-NEXT: slli.uw a0, a0, 4 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_50394234880: @@ -1575,6 +1654,7 @@ ; RV64IZBS-NEXT: lui a0, 188 ; RV64IZBS-NEXT: addiw a0, a0, -1093 ; RV64IZBS-NEXT: slli a0, a0, 16 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 50394234880 } @@ -1586,6 +1666,7 @@ ; RV32I-NEXT: addi a0, a0, 1911 ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, -1093 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_12900936431479: @@ -1595,6 +1676,7 @@ ; RV64I-NEXT: addi a0, a0, -1093 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, 1911 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_12900936431479: @@ -1603,6 +1685,7 @@ ; RV64IZBA-NEXT: addiw a0, a0, -1093 ; RV64IZBA-NEXT: slli.uw a0, a0, 12 ; RV64IZBA-NEXT: addi a0, a0, 1911 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_12900936431479: @@ -1612,6 +1695,7 @@ ; RV64IZBS-NEXT: addi a0, a0, -1093 ; RV64IZBS-NEXT: slli a0, a0, 12 ; RV64IZBS-NEXT: addi a0, a0, 1911 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 12900936431479 } @@ -1623,6 +1707,7 @@ ; RV32I-NEXT: addi a0, a0, -1366 ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, -1093 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_12900918536874: @@ -1633,6 +1718,7 @@ ; RV64I-NEXT: addi a0, a0, -1365 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, -1366 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_12900918536874: @@ -1641,6 +1727,7 @@ ; RV64IZBA-NEXT: addiw a0, a0, -1365 ; RV64IZBA-NEXT: slli.uw a0, a0, 12 ; RV64IZBA-NEXT: addi a0, a0, -1366 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_12900918536874: @@ -1651,6 +1738,7 @@ ; RV64IZBS-NEXT: addi a0, a0, -1365 ; RV64IZBS-NEXT: slli a0, a0, 12 ; RV64IZBS-NEXT: addi a0, a0, -1366 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 12900918536874 } @@ -1662,6 +1750,7 @@ ; RV32I-NEXT: addi a0, a0, 273 ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, -1093 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm_12900925247761: @@ -1672,6 +1761,7 @@ ; RV64I-NEXT: addi a0, a0, 273 ; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addi a0, a0, 273 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IZBA-LABEL: imm_12900925247761: @@ -1680,6 +1770,7 @@ ; RV64IZBA-NEXT: addiw a0, a0, 273 ; RV64IZBA-NEXT: slli.uw a0, a0, 12 ; RV64IZBA-NEXT: addi a0, a0, 273 +; RV64IZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBA-NEXT: ret ; ; RV64IZBS-LABEL: imm_12900925247761: @@ -1690,6 +1781,7 @@ ; RV64IZBS-NEXT: addi a0, a0, 273 ; RV64IZBS-NEXT: slli a0, a0, 12 ; RV64IZBS-NEXT: addi a0, a0, 273 +; RV64IZBS-NEXT: .cfi_def_cfa_offset 0 ; RV64IZBS-NEXT: ret ret i64 12900925247761 } diff --git a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll --- a/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-S-constraint.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: lui a0, %hi(var) ; RV32-NEXT: addi a0, a0, %lo(var) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S: @@ -19,6 +20,7 @@ ; RV64-NEXT: lui a0, %hi(var) ; RV64-NEXT: addi a0, a0, %lo(var) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ret = tail call i8* asm "lui $0, %hi($1)\0Aaddi $0, $0, %lo($1)", "=r,S"(i32* nonnull @var) ret i8* %ret @@ -34,6 +36,7 @@ ; RV32-NEXT: lui a0, %hi(.Ltmp0) ; RV32-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV32-NEXT: #NO_APP +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: constraint_S_label: @@ -44,6 +47,7 @@ ; RV64-NEXT: lui a0, %hi(.Ltmp0) ; RV64-NEXT: addi a0, a0, %lo(.Ltmp0) ; RV64-NEXT: #NO_APP +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: br label %L1 diff --git a/llvm/test/CodeGen/RISCV/large-stack.ll b/llvm/test/CodeGen/RISCV/large-stack.ll --- a/llvm/test/CodeGen/RISCV/large-stack.ll +++ b/llvm/test/CodeGen/RISCV/large-stack.ll @@ -16,6 +16,7 @@ ; RV32I-FPELIM-NEXT: lui a0, 74565 ; RV32I-FPELIM-NEXT: addi a0, a0, 1664 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test: @@ -35,8 +36,12 @@ ; RV32I-WITHFP-NEXT: addi a0, a0, -352 ; RV32I-WITHFP-NEXT: add sp, sp, a0 ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %tmp = alloca [ 305419896 x i8 ] , align 4 ret void @@ -74,9 +79,13 @@ ; RV32I-FPELIM-NEXT: lui a0, 97 ; RV32I-FPELIM-NEXT: addi a0, a0, 672 ; RV32I-FPELIM-NEXT: add sp, sp, a0 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset -2032 ; RV32I-FPELIM-NEXT: lw s1, 2024(sp) # 4-byte Folded Reload ; RV32I-FPELIM-NEXT: lw s0, 2028(sp) # 4-byte Folded Reload +; RV32I-FPELIM-NEXT: .cfi_restore s0 +; RV32I-FPELIM-NEXT: .cfi_restore s1 ; RV32I-FPELIM-NEXT: addi sp, sp, 2032 +; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: test_emergency_spill_slot: @@ -117,8 +126,14 @@ ; RV32I-WITHFP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-WITHFP-NEXT: .cfi_restore ra +; RV32I-WITHFP-NEXT: .cfi_restore s0 +; RV32I-WITHFP-NEXT: .cfi_restore s1 +; RV32I-WITHFP-NEXT: .cfi_restore s2 ; RV32I-WITHFP-NEXT: addi sp, sp, 2032 +; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; RV32I-WITHFP-NEXT: ret %data = alloca [ 100000 x i32 ] , align 4 %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %data, i32 0, i32 80000 diff --git a/llvm/test/CodeGen/RISCV/neg-abs.ll b/llvm/test/CodeGen/RISCV/neg-abs.ll --- a/llvm/test/CodeGen/RISCV/neg-abs.ll +++ b/llvm/test/CodeGen/RISCV/neg-abs.ll @@ -17,6 +17,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs32: @@ -24,6 +25,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs32: @@ -31,6 +33,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs32: @@ -38,6 +41,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) %neg = sub nsw i32 0, %abs @@ -50,6 +54,7 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs32: @@ -57,6 +62,7 @@ ; RV32IBT-NEXT: srai a1, a0, 31 ; RV32IBT-NEXT: xor a0, a0, a1 ; RV32IBT-NEXT: sub a0, a1, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs32: @@ -64,6 +70,7 @@ ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: subw a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs32: @@ -71,6 +78,7 @@ ; RV64IBT-NEXT: sraiw a1, a0, 31 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: subw a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i32 %x, 0 %2 = sub nsw i32 0, %x @@ -88,6 +96,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: neg_abs64: @@ -99,6 +108,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: neg_abs64: @@ -106,6 +116,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: neg_abs64: @@ -113,6 +124,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs @@ -129,6 +141,7 @@ ; RV32I-NEXT: sub a1, a2, a1 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32IBT-LABEL: select_neg_abs64: @@ -140,6 +153,7 @@ ; RV32IBT-NEXT: sub a1, a2, a1 ; RV32IBT-NEXT: sub a1, a1, a3 ; RV32IBT-NEXT: sub a0, a2, a0 +; RV32IBT-NEXT: .cfi_def_cfa_offset 0 ; RV32IBT-NEXT: ret ; ; RV64I-LABEL: select_neg_abs64: @@ -147,6 +161,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 ; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64IBT-LABEL: select_neg_abs64: @@ -154,6 +169,7 @@ ; RV64IBT-NEXT: srai a1, a0, 63 ; RV64IBT-NEXT: xor a0, a0, a1 ; RV64IBT-NEXT: sub a0, a1, a0 +; RV64IBT-NEXT: .cfi_def_cfa_offset 0 ; RV64IBT-NEXT: ret %1 = icmp slt i64 %x, 0 %2 = sub nsw i64 0, %x diff --git a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll --- a/llvm/test/CodeGen/RISCV/patchable-function-entry.ll +++ b/llvm/test/CodeGen/RISCV/patchable-function-entry.ll @@ -18,8 +18,10 @@ ; CHECK-LABEL: f1: ; CHECK-NEXT: .Lfunc_begin1: ; NORVC: addi zero, zero, 0 +; NORVC-NEXT: .cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC: c.nop +; RVC-NEXT: .cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"awo",@progbits,f1{{$}} ; 32: .p2align 2 @@ -34,8 +36,10 @@ ; CHECK-LABEL: f5: ; CHECK-NEXT: .Lfunc_begin2: ; NORVC-COUNT-5: addi zero, zero, 0 +; NORVC-NEXT: .cfi_def_cfa_offset 0 ; NORVC-NEXT: jalr zero, 0(ra) ; RVC-COUNT-5: c.nop +; RVC-NEXT: .cfi_def_cfa_offset 0 ; RVC-NEXT: c.jr ra ; CHECK: .section __patchable_function_entries,"aGwo",@progbits,f5,comdat,f5{{$}} ; RV32: .p2align 2 diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll --- a/llvm/test/CodeGen/RISCV/rv32zba.ll +++ b/llvm/test/CodeGen/RISCV/rv32zba.ll @@ -10,12 +10,14 @@ ; RV32I-NEXT: slli a0, a0, 1 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lh a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh1add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a2 ; RV32ZBA-NEXT: lh a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i16, i16* %1, i64 %0 %4 = load i16, i16* %3 @@ -28,12 +30,14 @@ ; RV32I-NEXT: slli a0, a0, 2 ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh2add: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a0) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i32, i32* %1, i64 %0 %4 = load i32, i32* %3 @@ -47,6 +51,7 @@ ; RV32I-NEXT: add a1, a2, a0 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: lw a1, 4(a1) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: sh3add: @@ -54,6 +59,7 @@ ; RV32ZBA-NEXT: sh3add a1, a0, a2 ; RV32ZBA-NEXT: lw a0, 0(a1) ; RV32ZBA-NEXT: lw a1, 4(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %3 = getelementptr inbounds i64, i64* %1, i64 %0 %4 = load i64, i64* %3 @@ -66,12 +72,14 @@ ; RV32I-NEXT: addi a2, zero, 6 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 6 %d = add i32 %c, %b @@ -84,12 +92,14 @@ ; RV32I-NEXT: addi a2, zero, 10 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul10: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 10 %d = add i32 %c, %b @@ -102,12 +112,14 @@ ; RV32I-NEXT: addi a2, zero, 12 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul12: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 12 %d = add i32 %c, %b @@ -120,12 +132,14 @@ ; RV32I-NEXT: addi a2, zero, 18 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul18: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 18 %d = add i32 %c, %b @@ -138,12 +152,14 @@ ; RV32I-NEXT: addi a2, zero, 20 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul20: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 20 %d = add i32 %c, %b @@ -156,12 +172,14 @@ ; RV32I-NEXT: addi a2, zero, 24 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul24: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 24 %d = add i32 %c, %b @@ -174,12 +192,14 @@ ; RV32I-NEXT: addi a2, zero, 36 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul36: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 36 %d = add i32 %c, %b @@ -192,12 +212,14 @@ ; RV32I-NEXT: addi a2, zero, 40 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul40: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 40 %d = add i32 %c, %b @@ -210,12 +232,14 @@ ; RV32I-NEXT: addi a2, zero, 72 ; RV32I-NEXT: mul a0, a0, a2 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addmul72: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 72 %d = add i32 %c, %b @@ -227,12 +251,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 96 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul96: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 96 ret i32 %c @@ -243,12 +269,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 160 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul160: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 160 ret i32 %c @@ -259,12 +287,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 288 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul288: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 288 ret i32 %c @@ -275,12 +305,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 258 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul258: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 258 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 258 ret i32 %c @@ -291,12 +323,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 260 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul260: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 260 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 260 ret i32 %c @@ -307,12 +341,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 264 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul264: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 264 ; RV32ZBA-NEXT: mul a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 264 ret i32 %c @@ -323,12 +359,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 11 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul11: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 11 ret i32 %c @@ -339,12 +377,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 19 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul19: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 19 ret i32 %c @@ -355,12 +395,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 13 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul13: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 13 ret i32 %c @@ -371,12 +413,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 21 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul21: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 21 ret i32 %c @@ -387,12 +431,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 37 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul37: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 37 ret i32 %c @@ -403,12 +449,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 25 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul25: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 25 ret i32 %c @@ -419,12 +467,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 41 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul41: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 41 ret i32 %c @@ -435,12 +485,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 73 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul73: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a1, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 73 ret i32 %c @@ -451,12 +503,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 27 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul27: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh1add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 27 ret i32 %c @@ -467,12 +521,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 45 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul45: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh2add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 45 ret i32 %c @@ -483,12 +539,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 81 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul81: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a0, a0 ; RV32ZBA-NEXT: sh3add a0, a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 81 ret i32 %c @@ -500,12 +558,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 2 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4098: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh1add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4098 ret i32 %c @@ -517,12 +577,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 4 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4100: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh2add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4100 ret i32 %c @@ -534,12 +596,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: mul a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: mul4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: slli a1, a0, 12 ; RV32ZBA-NEXT: sh3add a0, a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = mul i32 %a, 4104 ret i32 %c @@ -551,12 +615,14 @@ ; RV32I-NEXT: lui a1, 1 ; RV32I-NEXT: addi a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add4104: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh2add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 4104 ret i32 %c @@ -568,12 +634,14 @@ ; RV32I-NEXT: lui a1, 2 ; RV32I-NEXT: addi a1, a1, 16 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: add8208: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi a1, zero, 1026 ; RV32ZBA-NEXT: sh3add a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = add i32 %a, 8208 ret i32 %c @@ -585,12 +653,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 6 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_6: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh1add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -604,12 +674,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 7 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_7: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh2add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -623,12 +695,14 @@ ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: addshl_5_8: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: sh3add a0, a1, a0 ; RV32ZBA-NEXT: slli a0, a0, 5 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -734,12 +734,14 @@ ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: neg a1, a0 ; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs @@ -757,6 +759,7 @@ ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: .LBB19_2: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: abs_i64: @@ -768,6 +771,7 @@ ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 ; RV32ZBB-NEXT: .LBB19_2: +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) ret i64 %abs @@ -861,6 +865,7 @@ ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a1, a0, a1 ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBB-LABEL: bswap_i64: @@ -868,6 +873,7 @@ ; RV32ZBB-NEXT: rev8 a2, a1 ; RV32ZBB-NEXT: rev8 a1, a0 ; RV32ZBB-NEXT: mv a0, a2 +; RV32ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBB-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 diff --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll @@ -2141,6 +2141,7 @@ ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a1, a0, a1 ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bswap_i64: @@ -2148,6 +2149,7 @@ ; RV32ZBP-NEXT: rev8 a2, a1 ; RV32ZBP-NEXT: rev8 a1, a0 ; RV32ZBP-NEXT: mv a0, a2 +; RV32ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBP-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 @@ -2362,11 +2364,13 @@ ; RV32I-NEXT: slli a1, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bswap_rotr_i32: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: rev8.h a0, a0 +; RV32ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16) @@ -2391,11 +2395,13 @@ ; RV32I-NEXT: srli a1, a0, 16 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bswap_rotl_i32: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: rev8.h a0, a0 +; RV32ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16) @@ -2447,11 +2453,13 @@ ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bitreverse_bswap_i32: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: rev.b a0, a0 +; RV32ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBP-NEXT: ret %1 = call i32 @llvm.bitreverse.i32(i32 %a) %2 = call i32 @llvm.bswap.i32(i32 %1) @@ -2536,12 +2544,14 @@ ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: or a1, a1, a3 ; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bitreverse_bswap_i64: ; RV32ZBP: # %bb.0: ; RV32ZBP-NEXT: rev.b a0, a0 ; RV32ZBP-NEXT: rev.b a1, a1 +; RV32ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBP-NEXT: ret %1 = call i64 @llvm.bitreverse.i64(i64 %a) %2 = call i64 @llvm.bswap.i64(i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -459,12 +459,14 @@ ; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: addi a1, a1, -5 ; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: sbclri_i32_large2: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: bclri a0, a0, 2 ; RV32ZBS-NEXT: bclri a0, a0, 31 +; RV32ZBS-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBS-NEXT: ret %2 = and i32 %0, 2147483643 ret i32 %2 @@ -476,12 +478,14 @@ ; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: addi a1, a1, -6 ; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: sbclri_i32_large3: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: andi a0, a0, -6 ; RV32ZBS-NEXT: bclri a0, a0, 31 +; RV32ZBS-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBS-NEXT: ret %2 = and i32 %0, 2147483642 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: addw a0, a0, a2 ; CHECK-NEXT: addiw a0, a0, 1 ; CHECK-NEXT: sllw a0, a0, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = mul i32 %x, %x %c = add i32 %b, 1 diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -28,6 +28,7 @@ ; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: ld a0, 0(a1) ; RV64I-NEXT: ld a1, 8(a1) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: slliuw_2: @@ -36,6 +37,7 @@ ; RV64ZBA-NEXT: add a1, a1, a0 ; RV64ZBA-NEXT: ld a0, 0(a1) ; RV64ZBA-NEXT: ld a1, 8(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i128, i128* %1, i64 %3 @@ -67,12 +69,14 @@ ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: add.uw a0, a0, a1 ; RV64ZBA-NEXT: lb a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i8, i8* %1, i64 %3 @@ -103,12 +107,14 @@ ; RV64I-NEXT: ori a0, a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zextw_demandedbits_i64: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: ori a0, a0, 1 ; RV64ZBA-NEXT: zext.w a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %2 = and i64 %0, 4294967294 %3 = or i64 %2, 1 @@ -121,12 +127,14 @@ ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a1 ; RV64ZBA-NEXT: lh a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i16, i16* %1, i64 %0 %4 = load i16, i16* %3 @@ -139,12 +147,14 @@ ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a1 ; RV64ZBA-NEXT: lw a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i32, i32* %1, i64 %0 %4 = load i32, i32* %3 @@ -157,12 +167,14 @@ ; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3add: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a1 ; RV64ZBA-NEXT: ld a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = getelementptr inbounds i64, i64* %1, i64 %0 %4 = load i64, i64* %3 @@ -176,12 +188,14 @@ ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add.uw a0, a0, a1 ; RV64ZBA-NEXT: lh a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i16, i16* %1, i64 %3 @@ -195,11 +209,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 1 %4 = and i64 %3, 8589934590 @@ -214,12 +230,14 @@ ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add.uw a0, a0, a1 ; RV64ZBA-NEXT: lw a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i32, i32* %1, i64 %3 @@ -233,11 +251,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 2 %4 = and i64 %3, 17179869180 @@ -252,12 +272,14 @@ ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add.uw a0, a0, a1 ; RV64ZBA-NEXT: ld a0, 0(a0) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = zext i32 %0 to i64 %4 = getelementptr inbounds i64, i64* %1, i64 %3 @@ -271,11 +293,13 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw_2: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add.uw a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %3 = shl i64 %0, 3 %4 = and i64 %3, 34359738360 @@ -296,6 +320,7 @@ ; RV64I-NEXT: sllw a1, a2, a0 ; RV64I-NEXT: sraiw a0, a0, 2 ; RV64I-NEXT: mul a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add_extra_sext: @@ -304,6 +329,7 @@ ; RV64ZBA-NEXT: sllw a1, a2, a0 ; RV64ZBA-NEXT: sraiw a0, a0, 2 ; RV64ZBA-NEXT: mul a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i32 %x, 2 %b = add i32 %a, %y @@ -321,12 +347,14 @@ ; RV64I-NEXT: addi a2, zero, 6 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 6 %d = add i64 %c, %b @@ -339,12 +367,14 @@ ; RV64I-NEXT: addi a2, zero, 10 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul10: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 10 %d = add i64 %c, %b @@ -357,12 +387,14 @@ ; RV64I-NEXT: addi a2, zero, 12 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul12: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 12 %d = add i64 %c, %b @@ -375,12 +407,14 @@ ; RV64I-NEXT: addi a2, zero, 18 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul18: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 18 %d = add i64 %c, %b @@ -393,12 +427,14 @@ ; RV64I-NEXT: addi a2, zero, 20 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul20: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 20 %d = add i64 %c, %b @@ -411,12 +447,14 @@ ; RV64I-NEXT: addi a2, zero, 24 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul24: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 24 %d = add i64 %c, %b @@ -429,12 +467,14 @@ ; RV64I-NEXT: addi a2, zero, 36 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul36: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 36 %d = add i64 %c, %b @@ -447,12 +487,14 @@ ; RV64I-NEXT: addi a2, zero, 40 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul40: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 40 %d = add i64 %c, %b @@ -465,12 +507,14 @@ ; RV64I-NEXT: addi a2, zero, 72 ; RV64I-NEXT: mul a0, a0, a2 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addmul72: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 72 %d = add i64 %c, %b @@ -482,12 +526,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 96 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul96: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 96 ret i64 %c @@ -498,12 +544,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 160 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul160: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 160 ret i64 %c @@ -514,12 +562,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 288 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul288: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 288 ret i64 %c @@ -530,12 +580,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: addi a0, a0, 5 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 1 ; RV64ZBA-NEXT: addi a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 1 %b = add i64 %a, 5 @@ -547,12 +599,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 2 ; RV64I-NEXT: addi a0, a0, -6 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 2 ; RV64ZBA-NEXT: addi a0, a0, -6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 2 %b = add i64 %a, -6 @@ -564,12 +618,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: ori a0, a0, 7 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3add_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a0, a0, 3 ; RV64ZBA-NEXT: ori a0, a0, 7 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = shl i64 %0, 3 %b = add i64 %a, 7 @@ -582,12 +638,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 31 ; RV64I-NEXT: addi a0, a0, 11 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh1adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 1 ; RV64ZBA-NEXT: addi a0, a0, 11 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 1 @@ -601,12 +659,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 30 ; RV64I-NEXT: addi a0, a0, -12 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh2adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 2 ; RV64ZBA-NEXT: addi a0, a0, -12 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 2 @@ -620,12 +680,14 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 29 ; RV64I-NEXT: addi a0, a0, 13 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: sh3adduw_imm: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli.uw a0, a0, 3 ; RV64ZBA-NEXT: addi a0, a0, 13 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = zext i32 %0 to i64 %b = shl i64 %a, 3 @@ -656,12 +718,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 258 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul258: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 258 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 258 ret i64 %c @@ -672,12 +736,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 260 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul260: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 260 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 260 ret i64 %c @@ -688,12 +754,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 264 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul264: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 264 ; RV64ZBA-NEXT: mul a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 264 ret i64 %c @@ -738,12 +806,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 11 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul11: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 11 ret i64 %c @@ -754,12 +824,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 19 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul19: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 19 ret i64 %c @@ -770,12 +842,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 13 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul13: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 13 ret i64 %c @@ -786,12 +860,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 21 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul21: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 21 ret i64 %c @@ -802,12 +878,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 37 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul37: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 37 ret i64 %c @@ -818,12 +896,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 25 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul25: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 25 ret i64 %c @@ -834,12 +914,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 41 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul41: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 41 ret i64 %c @@ -850,12 +932,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 73 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul73: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a1, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 73 ret i64 %c @@ -866,12 +950,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 27 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul27: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh1add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 27 ret i64 %c @@ -882,12 +968,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 45 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul45: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh2add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 45 ret i64 %c @@ -898,12 +986,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 81 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul81: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: sh3add a0, a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 81 ret i64 %c @@ -915,12 +1005,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 2 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4098: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh1add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4098 ret i64 %c @@ -932,12 +1024,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 4 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4100: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh2add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4100 ret i64 %c @@ -949,12 +1043,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 8 ; RV64I-NEXT: mul a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mul4104: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: slli a1, a0, 12 ; RV64ZBA-NEXT: sh3add a0, a0, a1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i64 %a, 4104 ret i64 %c @@ -965,12 +1061,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 192 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw192: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 192 ret i32 %c @@ -981,12 +1079,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 320 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw320: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 320 ret i32 %c @@ -997,12 +1097,14 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, 576 ; RV64I-NEXT: mulw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: mulw576: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a0, a0 ; RV64ZBA-NEXT: slliw a0, a0, 6 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = mul i32 %a, 576 ret i32 %c @@ -1014,12 +1116,14 @@ ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, 8 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: add4104: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 1026 ; RV64ZBA-NEXT: sh2add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 4104 ret i64 %c @@ -1031,12 +1135,14 @@ ; RV64I-NEXT: lui a1, 2 ; RV64I-NEXT: addiw a1, a1, 16 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: add8208: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi a1, zero, 1026 ; RV64ZBA-NEXT: sh3add a0, a1, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = add i64 %a, 8208 ret i64 %c @@ -1048,12 +1154,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 6 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 6 @@ -1067,12 +1175,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 6 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_6: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh1add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 6 @@ -1086,12 +1196,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 7 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_7: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 7 @@ -1105,12 +1217,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 7 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_7: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh2add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 7 @@ -1124,12 +1238,14 @@ ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: slliw a1, a1, 8 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl32_5_8: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slliw a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i32 %a, 5 %d = shl i32 %b, 8 @@ -1143,12 +1259,14 @@ ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: addshl64_5_8: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: sh3add a0, a1, a0 ; RV64ZBA-NEXT: slli a0, a0, 5 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %c = shl i64 %a, 5 %d = shl i64 %b, 8 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -290,7 +290,9 @@ ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srliw a0, a0, 24 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: addi a0, zero, 32 @@ -300,6 +302,7 @@ ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srliw a0, a0, 1 ; RV64ZBB-NEXT: clzw a0, a0 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %1 = lshr i32 %a, 1 %2 = call i32 @llvm.ctlz.i32(i32 %1, i1 false) @@ -1011,6 +1014,7 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i32: @@ -1018,6 +1022,7 @@ ; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: neg a1, a0 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs @@ -1031,12 +1036,14 @@ ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: abs_i64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: neg a1, a0 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) ret i64 %abs @@ -1165,11 +1172,13 @@ ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: bswap_i64: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBB-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -2469,11 +2469,13 @@ ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_i64: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: rev8 a0, a0 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i64 @llvm.bswap.i64(i64 %a) ret i64 %1 @@ -2742,11 +2744,13 @@ ; RV64I-NEXT: slliw a0, a0, 16 ; RV64I-NEXT: srliw a1, a1, 16 ; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_rotr_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 8 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16) @@ -2768,11 +2772,13 @@ ; RV64I-NEXT: srliw a0, a0, 16 ; RV64I-NEXT: slliw a1, a1, 16 ; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bswap_rotl_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 8 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bswap.i32(i32 %a) %2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16) @@ -2824,11 +2830,13 @@ ; RV64I-NEXT: slliw a0, a0, 24 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bitreverse_bswap_i32: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: greviw a0, a0, 7 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i32 @llvm.bitreverse.i32(i32 %a) %2 = call i32 @llvm.bswap.i32(i32 %1) @@ -2926,11 +2934,13 @@ ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: bitreverse_bswap_i64: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: rev.b a0, a0 +; RV64ZBP-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBP-NEXT: ret %1 = call i64 @llvm.bitreverse.i64(i64 %a) %2 = call i64 @llvm.bswap.i64(i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv1i16( %v, i1 false) ret %r @@ -23,6 +24,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv2i16( %v, i1 false) ret %r @@ -36,6 +38,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv4i16( %v, i1 false) ret %r @@ -49,6 +52,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv8i16( %v, i1 false) ret %r @@ -62,6 +66,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv16i16( %v, i1 false) ret %r @@ -75,6 +80,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv32i16( %v, i1 false) ret %r @@ -88,6 +94,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv1i32( %v, i1 false) ret %r @@ -101,6 +108,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv2i32( %v, i1 false) ret %r @@ -114,6 +122,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv4i32( %v, i1 false) ret %r @@ -127,6 +136,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv8i32( %v, i1 false) ret %r @@ -140,6 +150,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv16i32( %v, i1 false) ret %r @@ -153,6 +164,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv1i64( %v, i1 false) ret %r @@ -166,6 +178,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv2i64( %v, i1 false) ret %r @@ -179,6 +192,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv4i64( %v, i1 false) ret %r @@ -192,6 +206,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.abs.nxv8i64( %v, i1 false) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -12,6 +12,7 @@ ; RV64IV-NEXT: ld a1, 520(sp) ; RV64IV-NEXT: sd a1, 0(a0) ; RV64IV-NEXT: addi sp, sp, 528 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local = alloca i64 %array = alloca [64 x i64] @@ -44,6 +45,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: addi sp, sp, 544 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local = alloca i64 %vector = alloca diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -51,8 +51,12 @@ ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 240 ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4) + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa $x2, 2032 ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3) + ; CHECK-NEXT: CFI_INSTRUCTION restore $x1 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x8 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 2032 + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -18,6 +19,7 @@ ; CHECK-LABEL: ret_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -27,6 +29,7 @@ ; CHECK-LABEL: ret_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -40,6 +43,7 @@ ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vl8re64.v v16, (a1) ; CHECK-NEXT: vl8re64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -50,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -60,6 +65,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -88,6 +94,7 @@ ; CHECK-NEXT: vs8r.v v0, (a1) ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %x ret %v @@ -184,6 +191,7 @@ ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %x ret %v @@ -194,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add %v, %w ret %r @@ -204,6 +213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add %v, %w ret %r @@ -214,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = xor %v, %w ret %r @@ -224,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = and %v, %w ret %r @@ -271,6 +283,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add %x, %y %s = add %r, %z @@ -312,7 +325,9 @@ ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_i32: @@ -343,7 +358,9 @@ ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 32 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %t = call fastcc @ext2( %y, %x, i32 %w, i32 2) ret %t @@ -419,7 +436,9 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_i32: @@ -491,7 +510,9 @@ ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 32 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %t = call fastcc @ext3( %z, %y, %x, i32 %w, i32 42) ret %t @@ -511,6 +532,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vv v16, v16, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = add %x, %z ret %s @@ -567,7 +589,9 @@ ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: pass_vector_arg_indirect_stack: @@ -619,7 +643,9 @@ ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 32 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = call fastcc @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, zeroinitializer, zeroinitializer, zeroinitializer, i32 8) ret %s diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -14,6 +14,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: vadd.vv v16, v16, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: callee_scalable_vector_split_indirect: @@ -26,6 +27,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV64-NEXT: vadd.vv v8, v8, v24 ; RV64-NEXT: vadd.vv v16, v16, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = add %x, %y ret %a @@ -58,7 +60,9 @@ ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 48 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: caller_scalable_vector_split_indirect: @@ -86,7 +90,9 @@ ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 32 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = alloca i64 %a = call @callee_scalable_vector_split_indirect( zeroinitializer, %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: addi a0, zero, 7 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> ) %v2 = add <2 x i64> %v1, @@ -24,6 +25,7 @@ ; CHECK-NEXT: addi a0, zero, 7 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i64 7, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -42,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> %a1) %v2 = sub <2 x i64> %v1, %a1 @@ -53,6 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = call @llvm.umax.nxv2i64( %a0, %a1) %v2 = sub %v1, %a1 @@ -64,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a0, <2 x i64> %a1) %v2 = sub <2 x i64> %a0, %v1 @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = call @llvm.umin.nxv2i64( %a0, %a1) %v2 = sub %a0, %v1 @@ -90,6 +96,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp uge <2 x i64> %a0, %a1 %v1 = sub <2 x i64> %a0, %a1 @@ -102,6 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp uge %a0, %a1 %v1 = sub %a0, %a1 @@ -148,6 +156,7 @@ ; CHECK-NEXT: addi a0, zero, 6 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, %cmp = icmp ugt <2 x i64> %a0, @@ -161,6 +170,7 @@ ; CHECK-NEXT: addi a0, zero, 6 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cm1 = insertelement poison, i64 -6, i32 0 %splatcm1 = shufflevector %cm1, poison, zeroinitializer @@ -179,6 +189,7 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vssubu.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vselect_add_const_signbit_v2i16: @@ -187,6 +198,7 @@ ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ugt <2 x i16> %a0, %v1 = add <2 x i16> %a0, @@ -201,6 +213,7 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-NEXT: vssubu.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vselect_add_const_signbit_nxv2i16: @@ -209,6 +222,7 @@ ; RV64-NEXT: addiw a0, a0, -1 ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cm1 = insertelement poison, i16 32766, i32 0 %splatcm1 = shufflevector %cm1, poison, zeroinitializer @@ -228,6 +242,7 @@ ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt <2 x i16> %a0, zeroinitializer %v1 = xor <2 x i16> %a0, @@ -241,6 +256,7 @@ ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %a0, zeroinitializer %ins = insertelement poison, i16 -32768, i32 0 @@ -260,6 +276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, %a1 %cmp = icmp ule <2 x i64> %a0, %v1 @@ -272,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = add %a0, %a1 %cmp = icmp ule %a0, %v1 @@ -289,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, %cmp = icmp ule <2 x i64> %a0, @@ -301,6 +320,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cm1 = insertelement poison, i64 6, i32 0 %splatcm1 = shufflevector %cm1, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 255, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 3 ; CHECK-NEXT: vand.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i64 7, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -44,6 +46,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i64 1, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -66,6 +69,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 2, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -83,6 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 2, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer @@ -100,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i16 2, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %addr1 = getelementptr float, float * %ptr, i64 1 %addr2 = getelementptr float, float * %ptr, i64 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll --- a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll +++ b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll @@ -34,6 +34,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v8, v12, v18, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %v2 = shufflevector <8 x i16> %v0, <8 x i16> poison, <16 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -22,6 +22,7 @@ ; RV32-NEXT: vand.vx v8, v8, a0 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: fixedlen: @@ -33,6 +34,7 @@ ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v41 = insertelement <2 x i32> undef, i32 16, i32 0 %v42 = shufflevector <2 x i32> %v41, <2 x i32> undef, <2 x i32> zeroinitializer @@ -54,6 +56,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v41 = insertelement undef, i32 16, i32 0 %v42 = shufflevector %v41, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -25,6 +25,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv1i8( %va, i1 false) ret %a @@ -54,6 +55,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv2i8( %va, i1 false) ret %a @@ -83,6 +85,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv4i8( %va, i1 false) ret %a @@ -112,6 +115,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv8i8( %va, i1 false) ret %a @@ -141,6 +145,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv16i8( %va, i1 false) ret %a @@ -170,6 +175,7 @@ ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv32i8( %va, i1 false) ret %a @@ -199,6 +205,7 @@ ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv64i8( %va, i1 false) ret %a @@ -237,6 +244,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv1i16: @@ -270,6 +278,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i16( %va, i1 false) ret %a @@ -308,6 +317,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv2i16: @@ -341,6 +351,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i16( %va, i1 false) ret %a @@ -379,6 +390,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv4i16: @@ -412,6 +424,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i16( %va, i1 false) ret %a @@ -450,6 +463,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv8i16: @@ -483,6 +497,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i16( %va, i1 false) ret %a @@ -521,6 +536,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv16i16: @@ -554,6 +570,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv16i16( %va, i1 false) ret %a @@ -592,6 +609,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv32i16: @@ -625,6 +643,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv32i16( %va, i1 false) ret %a @@ -666,6 +685,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv1i32: @@ -702,6 +722,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i32( %va, i1 false) ret %a @@ -743,6 +764,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv2i32: @@ -779,6 +801,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i32( %va, i1 false) ret %a @@ -820,6 +843,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv4i32: @@ -856,6 +880,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i32( %va, i1 false) ret %a @@ -897,6 +922,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv8i32: @@ -933,6 +959,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i32( %va, i1 false) ret %a @@ -974,6 +1001,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv16i32: @@ -1010,6 +1038,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv16i32( %va, i1 false) ret %a @@ -1074,6 +1103,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv1i64: @@ -1136,6 +1166,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 false) ret %a @@ -1200,6 +1231,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv2i64: @@ -1262,6 +1294,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 false) ret %a @@ -1326,6 +1359,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv4i64: @@ -1388,6 +1422,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 false) ret %a @@ -1452,6 +1487,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_nxv8i64: @@ -1514,6 +1550,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 false) ret %a @@ -1543,6 +1580,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv1i8( %va, i1 true) ret %a @@ -1571,6 +1609,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv2i8( %va, i1 true) ret %a @@ -1599,6 +1638,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv4i8( %va, i1 true) ret %a @@ -1627,6 +1667,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv8i8( %va, i1 true) ret %a @@ -1655,6 +1696,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv16i8( %va, i1 true) ret %a @@ -1683,6 +1725,7 @@ ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv32i8( %va, i1 true) ret %a @@ -1711,6 +1754,7 @@ ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctlz.nxv64i8( %va, i1 true) ret %a @@ -1748,6 +1792,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv1i16: @@ -1781,6 +1826,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i16( %va, i1 true) ret %a @@ -1818,6 +1864,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv2i16: @@ -1851,6 +1898,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i16( %va, i1 true) ret %a @@ -1888,6 +1936,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv4i16: @@ -1921,6 +1970,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i16( %va, i1 true) ret %a @@ -1958,6 +2008,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv8i16: @@ -1991,6 +2042,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i16( %va, i1 true) ret %a @@ -2028,6 +2080,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv16i16: @@ -2061,6 +2114,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv16i16( %va, i1 true) ret %a @@ -2098,6 +2152,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv32i16: @@ -2131,6 +2186,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv32i16( %va, i1 true) ret %a @@ -2171,6 +2227,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv1i32: @@ -2207,6 +2264,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i32( %va, i1 true) ret %a @@ -2247,6 +2305,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv2i32: @@ -2283,6 +2342,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i32( %va, i1 true) ret %a @@ -2323,6 +2383,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv4i32: @@ -2359,6 +2420,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i32( %va, i1 true) ret %a @@ -2399,6 +2461,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv8i32: @@ -2435,6 +2498,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i32( %va, i1 true) ret %a @@ -2475,6 +2539,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv16i32: @@ -2511,6 +2576,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv16i32( %va, i1 true) ret %a @@ -2574,6 +2640,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv1i64: @@ -2636,6 +2703,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv1i64( %va, i1 true) ret %a @@ -2699,6 +2767,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv2i64: @@ -2761,6 +2830,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv2i64( %va, i1 true) ret %a @@ -2824,6 +2894,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv4i64: @@ -2886,6 +2957,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv4i64( %va, i1 true) ret %a @@ -2949,6 +3021,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctlz_zero_undef_nxv8i64: @@ -3011,6 +3084,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctlz.nxv8i64( %va, i1 true) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -18,6 +18,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv1i8( %va) ret %a @@ -40,6 +41,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv2i8( %va) ret %a @@ -62,6 +64,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv4i8( %va) ret %a @@ -84,6 +87,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv8i8( %va) ret %a @@ -106,6 +110,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv16i8( %va) ret %a @@ -128,6 +133,7 @@ ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv32i8( %va) ret %a @@ -150,6 +156,7 @@ ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.ctpop.nxv64i8( %va) ret %a @@ -179,6 +186,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv1i16: @@ -203,6 +211,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv1i16( %va) ret %a @@ -232,6 +241,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv2i16: @@ -256,6 +266,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv2i16( %va) ret %a @@ -285,6 +296,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv4i16: @@ -309,6 +321,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv4i16( %va) ret %a @@ -338,6 +351,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv8i16: @@ -362,6 +376,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv8i16( %va) ret %a @@ -391,6 +406,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv16i16: @@ -415,6 +431,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv16i16( %va) ret %a @@ -444,6 +461,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv32i16: @@ -468,6 +486,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv32i16( %va) ret %a @@ -498,6 +517,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv1i32: @@ -523,6 +543,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv1i32( %va) ret %a @@ -553,6 +574,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv2i32: @@ -578,6 +600,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv2i32( %va) ret %a @@ -608,6 +631,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv4i32: @@ -633,6 +657,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv4i32( %va) ret %a @@ -663,6 +688,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv8i32: @@ -688,6 +714,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv8i32( %va) ret %a @@ -718,6 +745,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv16i32: @@ -743,6 +771,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) ret %a @@ -793,6 +822,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv1i64: @@ -841,6 +871,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv1i64( %va) ret %a @@ -891,6 +922,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv2i64: @@ -939,6 +971,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv2i64( %va) ret %a @@ -989,6 +1022,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv4i64: @@ -1037,6 +1071,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv4i64( %va) ret %a @@ -1087,6 +1122,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ctpop_nxv8i64: @@ -1135,6 +1171,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -22,6 +22,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv1i8( %va, i1 false) ret %a @@ -48,6 +49,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv2i8( %va, i1 false) ret %a @@ -74,6 +76,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv4i8( %va, i1 false) ret %a @@ -100,6 +103,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv8i8( %va, i1 false) ret %a @@ -126,6 +130,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv16i8( %va, i1 false) ret %a @@ -152,6 +157,7 @@ ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv32i8( %va, i1 false) ret %a @@ -178,6 +184,7 @@ ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv64i8( %va, i1 false) ret %a @@ -211,6 +218,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv1i16: @@ -239,6 +247,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i16( %va, i1 false) ret %a @@ -272,6 +281,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv2i16: @@ -300,6 +310,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i16( %va, i1 false) ret %a @@ -333,6 +344,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv4i16: @@ -361,6 +373,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i16( %va, i1 false) ret %a @@ -394,6 +407,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv8i16: @@ -422,6 +436,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i16( %va, i1 false) ret %a @@ -455,6 +470,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv16i16: @@ -483,6 +499,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv16i16( %va, i1 false) ret %a @@ -516,6 +533,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv32i16: @@ -544,6 +562,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv32i16( %va, i1 false) ret %a @@ -578,6 +597,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv1i32: @@ -607,6 +627,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i32( %va, i1 false) ret %a @@ -641,6 +662,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv2i32: @@ -670,6 +692,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i32( %va, i1 false) ret %a @@ -704,6 +727,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv4i32: @@ -733,6 +757,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i32( %va, i1 false) ret %a @@ -767,6 +792,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv8i32: @@ -796,6 +822,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i32( %va, i1 false) ret %a @@ -830,6 +857,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv16i32: @@ -859,6 +887,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv16i32( %va, i1 false) ret %a @@ -913,6 +942,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv1i64: @@ -965,6 +995,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 false) ret %a @@ -1019,6 +1050,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv2i64: @@ -1071,6 +1103,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 false) ret %a @@ -1125,6 +1158,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv4i64: @@ -1177,6 +1211,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 false) ret %a @@ -1231,6 +1266,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_nxv8i64: @@ -1283,6 +1319,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 false) ret %a @@ -1309,6 +1346,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv1i8( %va, i1 true) ret %a @@ -1334,6 +1372,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv2i8( %va, i1 true) ret %a @@ -1359,6 +1398,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv4i8( %va, i1 true) ret %a @@ -1384,6 +1424,7 @@ ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv8i8( %va, i1 true) ret %a @@ -1409,6 +1450,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv16i8( %va, i1 true) ret %a @@ -1434,6 +1476,7 @@ ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv32i8( %va, i1 true) ret %a @@ -1459,6 +1502,7 @@ ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call @llvm.cttz.nxv64i8( %va, i1 true) ret %a @@ -1491,6 +1535,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv1i16: @@ -1519,6 +1564,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i16( %va, i1 true) ret %a @@ -1551,6 +1597,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv2i16: @@ -1579,6 +1626,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i16( %va, i1 true) ret %a @@ -1611,6 +1659,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv4i16: @@ -1639,6 +1688,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i16( %va, i1 true) ret %a @@ -1671,6 +1721,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv8i16: @@ -1699,6 +1750,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i16( %va, i1 true) ret %a @@ -1731,6 +1783,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv16i16: @@ -1759,6 +1812,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv16i16( %va, i1 true) ret %a @@ -1791,6 +1845,7 @@ ; RV32-NEXT: addi a0, zero, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv32i16: @@ -1819,6 +1874,7 @@ ; RV64-NEXT: addi a0, zero, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv32i16( %va, i1 true) ret %a @@ -1852,6 +1908,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv1i32: @@ -1881,6 +1938,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i32( %va, i1 true) ret %a @@ -1914,6 +1972,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv2i32: @@ -1943,6 +2002,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i32( %va, i1 true) ret %a @@ -1976,6 +2036,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv4i32: @@ -2005,6 +2066,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i32( %va, i1 true) ret %a @@ -2038,6 +2100,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv8i32: @@ -2067,6 +2130,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i32( %va, i1 true) ret %a @@ -2100,6 +2164,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv16i32: @@ -2129,6 +2194,7 @@ ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv16i32( %va, i1 true) ret %a @@ -2182,6 +2248,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv1i64: @@ -2234,6 +2301,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv1i64( %va, i1 true) ret %a @@ -2287,6 +2355,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv2i64: @@ -2339,6 +2408,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv2i64( %va, i1 true) ret %a @@ -2392,6 +2462,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv4i64: @@ -2444,6 +2515,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv4i64( %va, i1 true) ret %a @@ -2497,6 +2569,7 @@ ; RV32-NEXT: addi a0, zero, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: cttz_zero_undef_nxv8i64: @@ -2549,6 +2622,7 @@ ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = call @llvm.cttz.nxv8i64( %va, i1 true) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -175,8 +175,23 @@ ; CHECK-NEXT: $x18 = LD $x2, 2000 :: (load (s64) from %stack.6) ; CHECK-NEXT: $x9 = LD $x2, 2008 :: (load (s64) from %stack.5) ; CHECK-NEXT: $x8 = LD $x2, 2016 :: (load (s64) from %stack.4) + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa $x2, 2032 ; CHECK-NEXT: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3) + ; CHECK-NEXT: CFI_INSTRUCTION restore $x1 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x8 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x9 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x18 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x19 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x20 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x21 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x22 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x23 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x24 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x25 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x26 + ; CHECK-NEXT: CFI_INSTRUCTION restore $x27 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 2032 + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET bb.0: successors: %bb.1, %bb.2 diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -22,6 +23,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -35,6 +37,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -48,6 +51,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -61,6 +65,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -74,6 +79,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -87,6 +93,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -100,6 +107,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -113,6 +121,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -126,6 +135,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -139,6 +149,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -152,6 +163,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -165,6 +177,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -178,6 +191,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -191,6 +205,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -204,6 +219,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -217,6 +233,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -230,6 +247,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -243,6 +261,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -255,6 +274,7 @@ ; CHECK-NEXT: vl1r.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -267,6 +287,7 @@ ; CHECK-NEXT: vl1r.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -279,6 +300,7 @@ ; CHECK-NEXT: vl1r.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -291,6 +313,7 @@ ; CHECK-NEXT: vl1r.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -303,6 +326,7 @@ ; CHECK-NEXT: vl1r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -315,6 +339,7 @@ ; CHECK-NEXT: vl1r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -327,6 +352,7 @@ ; CHECK-NEXT: vl2r.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -339,6 +365,7 @@ ; CHECK-NEXT: vl2r.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -351,6 +378,7 @@ ; CHECK-NEXT: vl2r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -363,6 +391,7 @@ ; CHECK-NEXT: vl2r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -375,6 +404,7 @@ ; CHECK-NEXT: vl4r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -387,6 +417,7 @@ ; CHECK-NEXT: vl4r.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -400,6 +431,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -412,6 +444,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -425,6 +458,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -438,6 +472,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -451,6 +486,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -464,6 +500,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -476,6 +513,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -489,6 +527,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -502,6 +541,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -515,6 +555,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -528,6 +569,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -540,6 +582,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -552,6 +595,7 @@ ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -564,6 +608,7 @@ ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -576,6 +621,7 @@ ; CHECK-NEXT: vl1re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -588,6 +634,7 @@ ; CHECK-NEXT: vl1re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -600,6 +647,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -612,6 +660,7 @@ ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -624,6 +673,7 @@ ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -636,6 +686,7 @@ ; CHECK-NEXT: vl2re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -648,6 +699,7 @@ ; CHECK-NEXT: vl2re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -660,6 +712,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -672,6 +725,7 @@ ; CHECK-NEXT: vl4re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -684,6 +738,7 @@ ; CHECK-NEXT: vl4re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -696,6 +751,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -710,6 +766,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -722,6 +779,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -735,6 +793,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -748,6 +807,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -762,6 +822,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -774,6 +835,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -786,6 +848,7 @@ ; CHECK-NEXT: vl1re32.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -798,6 +861,7 @@ ; CHECK-NEXT: vl1re32.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -812,6 +876,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -824,6 +889,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -836,6 +902,7 @@ ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -848,6 +915,7 @@ ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -862,6 +930,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -874,6 +943,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -886,6 +956,7 @@ ; CHECK-NEXT: vl4re32.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = sext %y to @@ -898,6 +969,7 @@ ; CHECK-NEXT: vl4re32.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = zext %y to @@ -912,6 +984,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -924,6 +997,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -940,6 +1014,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -954,6 +1029,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -966,6 +1042,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -982,6 +1059,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -996,6 +1074,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1008,6 +1087,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1024,6 +1104,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1038,6 +1119,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1050,6 +1132,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1066,6 +1149,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1080,6 +1164,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1092,6 +1177,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z @@ -1104,6 +1190,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1118,6 +1205,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1130,6 +1218,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1144,6 +1233,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1156,6 +1246,7 @@ ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1170,6 +1261,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1182,6 +1274,7 @@ ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1196,6 +1289,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1208,6 +1302,7 @@ ; CHECK-NEXT: vl4re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1220,6 +1315,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1232,6 +1328,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1244,6 +1341,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1256,6 +1354,7 @@ ; CHECK-NEXT: vl1re32.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1268,6 +1367,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1280,6 +1380,7 @@ ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1292,6 +1393,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1304,6 +1406,7 @@ ; CHECK-NEXT: vl4re32.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -1316,6 +1419,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1330,6 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1342,6 +1447,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1356,6 +1462,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1368,6 +1475,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1382,6 +1490,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1394,6 +1503,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1408,6 +1518,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -1420,6 +1531,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -6,6 +6,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %vec, i64 0) ret %c @@ -15,6 +16,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %vec, i64 4) ret %c @@ -24,6 +26,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 0) ret %c @@ -33,6 +36,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 2) ret %c @@ -42,6 +46,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 4) ret %c @@ -51,6 +56,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 6) ret %c @@ -60,6 +66,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( %vec, i64 0) ret %c @@ -69,6 +76,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( %vec, i64 8) ret %c @@ -78,6 +86,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 0) ret %c @@ -87,6 +96,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 4) ret %c @@ -96,6 +106,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 8) ret %c @@ -105,6 +116,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 12) ret %c @@ -114,6 +126,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 0) ret %c @@ -123,6 +136,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 2) ret %c @@ -132,6 +146,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 4) ret %c @@ -141,6 +156,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 6) ret %c @@ -150,6 +166,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 8) ret %c @@ -159,6 +176,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 10) ret %c @@ -168,6 +186,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 12) ret %c @@ -177,6 +196,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 14) ret %c @@ -186,6 +206,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 0) ret %c @@ -198,6 +219,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 1) ret %c @@ -210,6 +232,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 3) ret %c @@ -222,6 +245,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v15, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 15) ret %c @@ -231,6 +255,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv1i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 2) ret %c @@ -239,6 +264,7 @@ define @extract_nxv2i32_nxv1i32_0( %vec) { ; CHECK-LABEL: extract_nxv2i32_nxv1i32_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( %vec, i64 0) ret %c @@ -248,6 +274,7 @@ ; CHECK-LABEL: extract_nxv32i8_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 0) ret %c @@ -260,6 +287,7 @@ ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 2) ret %c @@ -272,6 +300,7 @@ ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 4) ret %c @@ -286,6 +315,7 @@ ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 6) ret %c @@ -295,6 +325,7 @@ ; CHECK-LABEL: extract_nxv32i8_nxv2i8_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 8) ret %c @@ -309,6 +340,7 @@ ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 22) ret %c @@ -323,6 +355,7 @@ ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( %vec, i64 7) ret %c @@ -337,6 +370,7 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( %vec, i64 3) ret %c @@ -346,6 +380,7 @@ ; CHECK-LABEL: extract_nxv2f16_nxv16f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 0) ret %c @@ -358,6 +393,7 @@ ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 2) ret %c @@ -367,6 +403,7 @@ ; CHECK-LABEL: extract_nxv2f16_nxv16f16_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 4) ret %c @@ -375,6 +412,7 @@ define @extract_nxv64i1_nxv8i1_0( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv8i1_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv8i1( %mask, i64 0) ret %c @@ -387,6 +425,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v0, v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv8i1( %mask, i64 8) ret %c @@ -395,6 +434,7 @@ define @extract_nxv64i1_nxv2i1_0( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv2i1_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i1( %mask, i64 0) ret %c @@ -412,6 +452,7 @@ ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv2i1( %mask, i64 2) ret %c @@ -420,6 +461,7 @@ define @extract_nxv4i1_nxv32i1_0( %x) { ; CHECK-LABEL: extract_nxv4i1_nxv32i1_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i1( %x, i64 0) ret %c @@ -437,6 +479,7 @@ ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv4i1( %x, i64 4) ret %c @@ -445,6 +488,7 @@ define @extract_nxv16i1_nxv32i1_0( %x) { ; CHECK-LABEL: extract_nxv16i1_nxv32i1_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv16i1( %x, i64 0) ret %c @@ -457,6 +501,7 @@ ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v0, v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call @llvm.experimental.vector.extract.nxv16i1( %x, i64 16) ret %c @@ -469,6 +514,7 @@ ; CHECK-LABEL: extract_nxv6f16_nxv12f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 0) ret %res @@ -489,6 +535,7 @@ ; CHECK-NEXT: vslideup.vx v13, v8, a0 ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 6) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -50,6 +54,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -61,6 +66,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -71,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -82,6 +89,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -93,6 +101,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -103,6 +112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -114,6 +124,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -125,6 +136,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -135,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -146,6 +159,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -157,6 +171,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -167,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -178,6 +194,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -189,6 +206,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -199,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -210,6 +229,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -221,6 +241,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -231,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -242,6 +264,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -253,6 +276,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -263,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -274,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -285,6 +311,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -295,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -306,6 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -317,6 +346,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -327,6 +357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -338,6 +369,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -349,6 +381,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -359,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -370,6 +404,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -381,6 +416,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -391,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -402,6 +439,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -413,6 +451,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -423,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -434,6 +474,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -445,6 +486,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -455,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -466,6 +509,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -477,6 +521,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -50,6 +54,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -61,6 +66,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -71,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -82,6 +89,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -93,6 +101,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -103,6 +112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -114,6 +124,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -125,6 +136,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -135,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -146,6 +159,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -157,6 +171,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -167,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret half %r @@ -178,6 +194,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret half %r @@ -189,6 +206,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret half %r @@ -199,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -210,6 +229,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -221,6 +241,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -231,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -242,6 +264,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -253,6 +276,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -263,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -274,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -285,6 +311,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -295,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -306,6 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -317,6 +346,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -327,6 +357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret float %r @@ -338,6 +369,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret float %r @@ -349,6 +381,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret float %r @@ -359,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -370,6 +404,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -381,6 +416,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -391,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -402,6 +439,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -413,6 +451,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -423,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -434,6 +474,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -445,6 +486,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r @@ -455,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret double %r @@ -466,6 +509,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret double %r @@ -477,6 +521,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret double %r diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -50,6 +54,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -61,6 +66,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -71,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -82,6 +89,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -93,6 +101,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -103,6 +112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -114,6 +124,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -125,6 +136,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -135,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -146,6 +159,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -157,6 +171,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -167,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -178,6 +194,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -189,6 +206,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -199,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -210,6 +229,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -221,6 +241,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -231,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -242,6 +264,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -253,6 +276,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -263,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -274,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -285,6 +311,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -295,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -306,6 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -317,6 +346,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -327,6 +357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -338,6 +369,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -349,6 +381,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -359,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -370,6 +404,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -381,6 +416,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -391,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -402,6 +439,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -413,6 +451,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -423,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -434,6 +474,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -445,6 +486,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -455,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -466,6 +509,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -477,6 +521,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -487,6 +532,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -498,6 +544,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -509,6 +556,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -519,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -530,6 +579,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -541,6 +591,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -551,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -562,6 +614,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -573,6 +626,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -586,6 +640,7 @@ ; CHECK-NEXT: vsrl.vx v9, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -600,6 +655,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -614,6 +670,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -627,6 +684,7 @@ ; CHECK-NEXT: vsrl.vx v10, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -641,6 +699,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -655,6 +714,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -668,6 +728,7 @@ ; CHECK-NEXT: vsrl.vx v12, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -682,6 +743,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -696,6 +758,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -709,6 +772,7 @@ ; CHECK-NEXT: vsrl.vx v16, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -723,6 +787,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -737,6 +802,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -50,6 +54,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -61,6 +66,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -71,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -82,6 +89,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -93,6 +101,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -103,6 +112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -114,6 +124,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -125,6 +136,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -135,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -146,6 +159,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -157,6 +171,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -167,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -178,6 +194,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -189,6 +206,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -199,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i8 %r @@ -210,6 +229,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i8 %r @@ -221,6 +241,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i8 %r @@ -231,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -242,6 +264,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -253,6 +276,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -263,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -274,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -285,6 +311,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -295,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -306,6 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -317,6 +346,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -327,6 +357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -338,6 +369,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -349,6 +381,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -359,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -370,6 +404,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -381,6 +416,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -391,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i16 %r @@ -402,6 +439,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i16 %r @@ -413,6 +451,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i16 %r @@ -423,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -434,6 +474,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -445,6 +486,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -455,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -466,6 +509,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -477,6 +521,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -487,6 +532,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -498,6 +544,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -509,6 +556,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -519,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -530,6 +579,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -541,6 +591,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -551,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i32 %r @@ -562,6 +614,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i32 %r @@ -573,6 +626,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r @@ -583,6 +637,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -594,6 +649,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -605,6 +661,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -615,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -626,6 +684,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -637,6 +696,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -647,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -658,6 +719,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -669,6 +731,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r @@ -679,6 +742,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 ret i64 %r @@ -690,6 +754,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 ret i64 %r @@ -701,6 +766,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll @@ -47,6 +47,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 160 ; CHECK-ASM-NEXT: bne a2, a5, .LBB0_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -117,6 +118,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 160 ; CHECK-ASM-NEXT: bne a2, a5, .LBB1_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -183,6 +185,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 160 ; CHECK-ASM-NEXT: bne a2, a5, .LBB2_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -247,6 +250,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 160 ; CHECK-ASM-NEXT: bne a2, a4, .LBB3_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -318,6 +322,7 @@ ; CHECK-ASM-NEXT: addi a0, a0, 160 ; CHECK-ASM-NEXT: bne a2, a5, .LBB4_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -389,6 +394,7 @@ ; CHECK-ASM-NEXT: addi a0, a0, 160 ; CHECK-ASM-NEXT: bne a2, a5, .LBB5_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -460,6 +466,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 128 ; CHECK-ASM-NEXT: bnez a2, .LBB6_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -533,6 +540,7 @@ ; CHECK-ASM-NEXT: addi a0, a0, 128 ; CHECK-ASM-NEXT: bnez a2, .LBB7_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -623,6 +631,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 256 ; CHECK-ASM-NEXT: bnez a2, .LBB8_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -775,6 +784,7 @@ ; CHECK-ASM-NEXT: addi a0, a0, 128 ; CHECK-ASM-NEXT: bnez a2, .LBB9_1 ; CHECK-ASM-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret entry: br label %vector.body @@ -873,6 +883,7 @@ ; CHECK-ASM-NEXT: addi a1, a1, 160 ; CHECK-ASM-NEXT: bnez a2, .LBB10_1 ; CHECK-ASM-NEXT: # %bb.2: +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret br label %3 @@ -949,6 +960,7 @@ ; CHECK-ASM-NEXT: addi a0, a0, 160 ; CHECK-ASM-NEXT: bnez a2, .LBB11_1 ; CHECK-ASM-NEXT: # %bb.2: +; CHECK-ASM-NEXT: .cfi_def_cfa_offset 0 ; CHECK-ASM-NEXT: ret br label %3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false) @@ -28,6 +29,7 @@ ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) @@ -60,6 +63,7 @@ ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false) @@ -77,6 +81,7 @@ ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: abs_v32i8: @@ -91,6 +96,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse8.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: abs_v32i8: @@ -105,6 +111,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse8.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false) @@ -121,6 +128,7 @@ ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: abs_v16i16: @@ -135,6 +143,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: abs_v16i16: @@ -149,6 +158,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false) @@ -165,6 +175,7 @@ ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: abs_v8i32: @@ -179,6 +190,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: abs_v8i32: @@ -193,6 +205,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false) @@ -209,6 +222,7 @@ ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: abs_v4i64: @@ -223,6 +237,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: abs_v4i64: @@ -237,6 +252,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll @@ -13,6 +13,7 @@ ; VLEN256-NEXT: vle8.v v0, (a1) ; VLEN256-NEXT: vadd.vv v8, v24, v8 ; VLEN256-NEXT: vadd.vv v16, v0, v16 +; VLEN256-NEXT: .cfi_def_cfa_offset 0 ; VLEN256-NEXT: ret ; ; VLEN512-LABEL: bitcast_1024B: @@ -20,6 +21,7 @@ ; VLEN512-NEXT: addi a0, zero, 512 ; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; VLEN512-NEXT: vadd.vv v8, v16, v8 +; VLEN512-NEXT: .cfi_def_cfa_offset 0 ; VLEN512-NEXT: ret ; ; VLEN1024-LABEL: bitcast_1024B: @@ -27,6 +29,7 @@ ; VLEN1024-NEXT: addi a0, zero, 512 ; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; VLEN1024-NEXT: vadd.vv v8, v12, v8 +; VLEN1024-NEXT: .cfi_def_cfa_offset 0 ; VLEN1024-NEXT: ret %c = bitcast <256 x i16> %a to <512 x i8> %v = add <512 x i8> %b, %c diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -8,6 +8,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = bitcast <4 x i8> %a to <32 x i1> %d = xor <32 x i1> %b, %c @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x i8> %a to i8 ret i8 %b @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <2 x i8> %a to i16 ret i16 %b @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x i16> %a to i16 ret i16 %b @@ -49,6 +53,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <4 x i8> %a to i32 ret i32 %b @@ -59,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <2 x i16> %a to i32 ret i32 %b @@ -69,6 +75,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x i32> %a to i32 ret i32 %b @@ -82,12 +89,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v8i8_i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <8 x i8> %a to i64 ret i64 %b @@ -101,12 +110,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v4i16_i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <4 x i16> %a to i64 ret i64 %b @@ -120,12 +131,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v2i32_i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <2 x i32> %a to i64 ret i64 %b @@ -139,12 +152,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v1i64_i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <1 x i64> %a to i64 ret i64 %b @@ -155,6 +170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <2 x i8> %a to half ret half %b @@ -165,6 +181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x i16> %a to half ret half %b @@ -175,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <4 x i8> %a to float ret float %b @@ -185,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <2 x i16> %a to float ret float %b @@ -195,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x i32> %a to float ret float %b @@ -208,12 +228,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v8i8_f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <8 x i8> %a to double ret double %b @@ -227,12 +249,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v4i16_f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <4 x i16> %a to double ret double %b @@ -246,12 +270,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v2i32_f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <2 x i32> %a to double ret double %b @@ -265,12 +291,14 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_v1i64_f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast <1 x i64> %a to double ret double %b @@ -281,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast i16 %a to <1 x i16> ret <1 x i16> %b @@ -291,12 +320,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i32_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast i32 %a to <2 x i16> ret <2 x i16> %b @@ -307,12 +338,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vmv.s.x v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i32_v1i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast i32 %a to <1 x i32> ret <1 x i32> %b @@ -327,12 +360,14 @@ ; RV32-NEXT: vslide1up.vx v10, v9, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast i64 %a to <4 x i16> ret <4 x i16> %b @@ -347,12 +382,14 @@ ; RV32-NEXT: vslide1up.vx v10, v9, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast i64 %a to <2 x i32> ret <2 x i32> %b @@ -367,12 +404,14 @@ ; RV32-NEXT: vslide1up.vx v10, v9, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vslideup.vi v8, v10, 0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %b = bitcast i64 %a to <1 x i64> ret <1 x i64> %b diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -34,6 +34,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v8i16: @@ -65,6 +66,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v8i16: @@ -96,6 +98,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v8i16: @@ -127,6 +130,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -175,6 +179,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v4i32: @@ -215,6 +220,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v4i32: @@ -255,6 +261,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v4i32: @@ -295,6 +302,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -387,6 +395,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v2i64: @@ -464,6 +473,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v2i64: @@ -548,6 +558,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v2i64: @@ -625,6 +636,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -664,6 +676,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v16i16: @@ -695,6 +708,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v16i16: @@ -747,6 +761,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v16i16: @@ -799,6 +814,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -847,6 +863,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v8i32: @@ -887,6 +904,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v8i32: @@ -954,6 +972,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV32-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v8i32: @@ -1021,6 +1040,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -1113,6 +1133,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bitreverse_v4i64: @@ -1190,6 +1211,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bitreverse_v4i64: @@ -1313,6 +1335,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v9, v8 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v12, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bitreverse_v4i64: @@ -1429,6 +1452,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v10, v8 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a7) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -13,6 +13,7 @@ ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v8i16: @@ -23,6 +24,7 @@ ; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v8i16: @@ -33,6 +35,7 @@ ; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v8i16: @@ -43,6 +46,7 @@ ; LMULMAX1-RV64-NEXT: vsll.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -70,6 +74,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v4i32: @@ -89,6 +94,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v4i32: @@ -108,6 +114,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v4i32: @@ -127,6 +134,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -189,6 +197,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v2i64: @@ -227,6 +236,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v2i64: @@ -281,6 +291,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v2i64: @@ -319,6 +330,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -337,6 +349,7 @@ ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v16i16: @@ -347,6 +360,7 @@ ; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v16i16: @@ -363,6 +377,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v16i16: @@ -379,6 +394,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -406,6 +422,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v8i32: @@ -425,6 +442,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v8i32: @@ -456,6 +474,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v8i32: @@ -487,6 +506,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -549,6 +569,7 @@ ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: bswap_v4i64: @@ -587,6 +608,7 @@ ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v12 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: bswap_v4i64: @@ -665,6 +687,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: bswap_v4i64: @@ -727,6 +750,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v8, (t1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %p ret <4 x i8> %v @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %p ret <4 x i32> %v @@ -27,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %p ret <8 x i32> %v @@ -37,6 +40,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; LMULMAX8-NEXT: vle64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v16i64: @@ -45,6 +49,7 @@ ; LMULMAX4-NEXT: vle64.v v8, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle64.v v12, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %v = load <16 x i64>, <16 x i64>* %p ret <16 x i64> %v @@ -55,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p ret <8 x i1> %v @@ -66,6 +72,7 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i1>, <32 x i1>* %p ret <32 x i1> %v @@ -80,6 +87,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 ; LMULMAX8-NEXT: vle32.v v16, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_split_v64i32: @@ -92,6 +100,7 @@ ; LMULMAX4-NEXT: vle32.v v16, (a1) ; LMULMAX4-NEXT: addi a0, a0, 192 ; LMULMAX4-NEXT: vle32.v v20, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x ret <64 x i32> %v @@ -117,6 +126,7 @@ ; LMULMAX8-NEXT: addi a1, a0, 128 ; LMULMAX8-NEXT: vse32.v v16, (a1) ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_split_v128i32: @@ -152,6 +162,7 @@ ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vse32.v v12, (a1) ; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %v = load <128 x i32>, <128 x i32>* %x ret <128 x i32> %v @@ -162,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add <4 x i8> %v, ret <4 x i8> %r @@ -172,6 +184,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add <4 x i8> %v, %w ret <4 x i8> %r @@ -182,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add <4 x i64> %v, %w ret <4 x i64> %r @@ -192,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = xor <8 x i1> %v, %w ret <8 x i1> %r @@ -203,6 +218,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = and <32 x i1> %v, %w ret <32 x i1> %r @@ -217,6 +233,7 @@ ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 ; LMULMAX8-NEXT: vadd.vx v8, v8, a1 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: @@ -231,6 +248,7 @@ ; LMULMAX4-NEXT: vadd.vv v8, v8, v28 ; LMULMAX4-NEXT: vadd.vx v8, v8, a2 ; LMULMAX4-NEXT: vadd.vx v12, v12, a2 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %r = add <32 x i32> %x, %y %s = add <32 x i32> %r, %z @@ -256,7 +274,9 @@ ; LMULMAX8-NEXT: vmv8r.v v16, v24 ; LMULMAX8-NEXT: call ext2@plt ; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra ; LMULMAX8-NEXT: addi sp, sp, 16 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_i32: @@ -274,7 +294,9 @@ ; LMULMAX4-NEXT: vmv4r.v v20, v24 ; LMULMAX4-NEXT: call ext2@plt ; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra ; LMULMAX4-NEXT: addi sp, sp, 16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %t = call fastcc <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2) ret <32 x i32> %t @@ -303,8 +325,12 @@ ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra +; LMULMAX8-NEXT: .cfi_restore s0 ; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: @@ -333,8 +359,12 @@ ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra +; LMULMAX4-NEXT: .cfi_restore s0 ; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42) ret <32 x i32> %t @@ -350,6 +380,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v16, (t2) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_indirect_stack: @@ -360,6 +391,7 @@ ; LMULMAX4-NEXT: vle32.v v20, (a0) ; LMULMAX4-NEXT: vadd.vv v8, v8, v16 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %s = add <32 x i32> %x, %z ret <32 x i32> %s @@ -397,8 +429,12 @@ ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra +; LMULMAX8-NEXT: .cfi_restore s0 ; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: pass_vector_arg_indirect_stack: @@ -434,8 +470,12 @@ ; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra +; LMULMAX4-NEXT: .cfi_restore s0 ; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8) ret <32 x i32> %s @@ -454,6 +494,7 @@ ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 ; LMULMAX8-NEXT: addi sp, sp, 16 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_direct_stack: @@ -470,6 +511,7 @@ ; LMULMAX4-NEXT: vadd.vv v8, v8, v24 ; LMULMAX4-NEXT: vadd.vv v12, v12, v28 ; LMULMAX4-NEXT: addi sp, sp, 16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %s = add <32 x i32> %x, %y %t = add <32 x i32> %s, %z @@ -509,7 +551,9 @@ ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_arg_direct_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra ; LMULMAX8-NEXT: addi sp, sp, 160 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: pass_vector_arg_direct_stack: @@ -546,7 +590,9 @@ ; LMULMAX4-NEXT: vmv4r.v v20, v8 ; LMULMAX4-NEXT: call vector_arg_direct_stack@plt ; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra ; LMULMAX4-NEXT: addi sp, sp, 160 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %s = call fastcc <32 x i32> @vector_arg_direct_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 1) ret <32 x i32> %s @@ -564,6 +610,7 @@ ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = xor <4 x i1> %m1, %m2 ret <4 x i1> %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %p ret <4 x i8> %v @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %p ret <4 x i32> %v @@ -29,18 +31,21 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v8i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX4-NEXT: vle32.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v8i32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vle32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v8i32: @@ -49,6 +54,7 @@ ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <8 x i32>, <8 x i32>* %p ret <8 x i32> %v @@ -59,6 +65,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; LMULMAX8-NEXT: vle64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v16i64: @@ -67,6 +74,7 @@ ; LMULMAX4-NEXT: vle64.v v8, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle64.v v12, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v16i64: @@ -79,6 +87,7 @@ ; LMULMAX2-NEXT: vle64.v v12, (a1) ; LMULMAX2-NEXT: addi a0, a0, 96 ; LMULMAX2-NEXT: vle64.v v14, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v16i64: @@ -99,6 +108,7 @@ ; LMULMAX1-NEXT: vle64.v v14, (a1) ; LMULMAX1-NEXT: addi a0, a0, 112 ; LMULMAX1-NEXT: vle64.v v15, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <16 x i64>, <16 x i64>* %p ret <16 x i64> %v @@ -109,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p ret <8 x i1> %v @@ -120,6 +131,7 @@ ; LMULMAX8-NEXT: addi a1, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vlm.v v0, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_mask_v32i1: @@ -127,6 +139,7 @@ ; LMULMAX4-NEXT: addi a1, zero, 32 ; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX4-NEXT: vlm.v v0, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_mask_v32i1: @@ -134,6 +147,7 @@ ; LMULMAX2-NEXT: addi a1, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v0, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_mask_v32i1: @@ -142,6 +156,7 @@ ; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: addi a0, a0, 2 ; LMULMAX1-NEXT: vlm.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %p ret <32 x i1> %v @@ -156,6 +171,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 ; LMULMAX8-NEXT: vle32.v v16, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_split_v64i32: @@ -168,6 +184,7 @@ ; LMULMAX4-NEXT: vle32.v v16, (a1) ; LMULMAX4-NEXT: addi a0, a0, 192 ; LMULMAX4-NEXT: vle32.v v20, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_split_v64i32: @@ -188,6 +205,7 @@ ; LMULMAX2-NEXT: vle32.v v20, (a1) ; LMULMAX2-NEXT: addi a0, a0, 224 ; LMULMAX2-NEXT: vle32.v v22, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_split_v64i32: @@ -224,6 +242,7 @@ ; LMULMAX1-NEXT: vle32.v v22, (a1) ; LMULMAX1-NEXT: addi a0, a0, 240 ; LMULMAX1-NEXT: vle32.v v23, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x ret <64 x i32> %v @@ -249,6 +268,7 @@ ; LMULMAX8-NEXT: addi a1, a0, 128 ; LMULMAX8-NEXT: vse32.v v16, (a1) ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_split_v128i32: @@ -284,6 +304,7 @@ ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vse32.v v12, (a1) ; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_split_v128i32: @@ -351,6 +372,7 @@ ; LMULMAX2-NEXT: addi a1, a0, 32 ; LMULMAX2-NEXT: vse32.v v10, (a1) ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_split_v128i32: @@ -482,6 +504,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <128 x i32>, <128 x i32>* %x ret <128 x i32> %v @@ -492,6 +515,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add <4 x i8> %v, ret <4 x i8> %r @@ -502,6 +526,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = add <4 x i8> %v, %w ret <4 x i8> %r @@ -512,18 +537,21 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-NEXT: vadd.vv v8, v8, v10 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX4-NEXT: vadd.vv v8, v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v4i64_param_v4i64_v4i64: @@ -531,6 +559,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-NEXT: vadd.vv v9, v9, v11 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %r = add <4 x i64> %v, %w ret <4 x i64> %r @@ -541,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = xor <8 x i1> %v, %w ret <8 x i1> %r @@ -552,6 +582,7 @@ ; LMULMAX8-NEXT: addi a0, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vmand.mm v0, v0, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i1_param_v32i1_v32i1: @@ -559,6 +590,7 @@ ; LMULMAX4-NEXT: addi a0, zero, 32 ; LMULMAX4-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX4-NEXT: vmand.mm v0, v0, v8 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i1_param_v32i1_v32i1: @@ -566,6 +598,7 @@ ; LMULMAX2-NEXT: addi a0, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX2-NEXT: vmand.mm v0, v0, v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i1_param_v32i1_v32i1: @@ -573,6 +606,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vmand.mm v0, v0, v9 ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %r = and <32 x i1> %v, %w ret <32 x i1> %r @@ -587,6 +621,7 @@ ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 ; LMULMAX8-NEXT: vadd.vx v8, v8, a1 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: @@ -601,6 +636,7 @@ ; LMULMAX4-NEXT: vadd.vv v8, v8, v28 ; LMULMAX4-NEXT: vadd.vx v8, v8, a2 ; LMULMAX4-NEXT: vadd.vx v12, v12, a2 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: @@ -625,6 +661,7 @@ ; LMULMAX2-NEXT: vadd.vx v10, v10, a4 ; LMULMAX2-NEXT: vadd.vx v12, v12, a4 ; LMULMAX2-NEXT: vadd.vx v14, v14, a4 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: @@ -670,6 +707,7 @@ ; LMULMAX1-NEXT: vadd.vx v13, v13, a0 ; LMULMAX1-NEXT: vadd.vx v14, v14, a0 ; LMULMAX1-NEXT: vadd.vx v15, v15, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %r = add <32 x i32> %x, %y %s = add <32 x i32> %r, %z @@ -695,7 +733,9 @@ ; LMULMAX8-NEXT: vmv8r.v v16, v24 ; LMULMAX8-NEXT: call ext2@plt ; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra ; LMULMAX8-NEXT: addi sp, sp, 16 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_i32: @@ -713,7 +753,9 @@ ; LMULMAX4-NEXT: vmv4r.v v20, v24 ; LMULMAX4-NEXT: call ext2@plt ; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra ; LMULMAX4-NEXT: addi sp, sp, 16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_i32: @@ -737,7 +779,9 @@ ; LMULMAX2-NEXT: vmv2r.v v22, v24 ; LMULMAX2-NEXT: call ext2@plt ; LMULMAX2-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_restore ra ; LMULMAX2-NEXT: addi sp, sp, 16 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_i32: @@ -773,7 +817,9 @@ ; LMULMAX1-NEXT: vmv1r.v v23, v24 ; LMULMAX1-NEXT: call ext2@plt ; LMULMAX1-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_restore ra ; LMULMAX1-NEXT: addi sp, sp, 16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %t = call <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2) ret <32 x i32> %t @@ -802,8 +848,12 @@ ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra +; LMULMAX8-NEXT: .cfi_restore s0 ; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: @@ -832,8 +882,12 @@ ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra +; LMULMAX4-NEXT: .cfi_restore s0 ; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: @@ -872,8 +926,12 @@ ; LMULMAX2-NEXT: call ext3@plt ; LMULMAX2-NEXT: addi sp, s0, -384 ; LMULMAX2-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX2-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_restore ra +; LMULMAX2-NEXT: .cfi_restore s0 ; LMULMAX2-NEXT: addi sp, sp, 384 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: @@ -935,8 +993,12 @@ ; LMULMAX1-NEXT: call ext3@plt ; LMULMAX1-NEXT: addi sp, s0, -384 ; LMULMAX1-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX1-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_restore ra +; LMULMAX1-NEXT: .cfi_restore s0 ; LMULMAX1-NEXT: addi sp, sp, 384 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42) ret <32 x i32> %t @@ -962,6 +1024,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v16, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: split_vector_args: @@ -972,6 +1035,7 @@ ; LMULMAX4-NEXT: vle32.v v12, (a1) ; LMULMAX4-NEXT: vadd.vv v8, v16, v8 ; LMULMAX4-NEXT: vadd.vv v12, v20, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: split_vector_args: @@ -986,6 +1050,7 @@ ; LMULMAX2-NEXT: vadd.vv v10, v16, v10 ; LMULMAX2-NEXT: vadd.vv v12, v18, v12 ; LMULMAX2-NEXT: vadd.vv v14, v20, v24 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: split_vector_args: @@ -1008,6 +1073,7 @@ ; LMULMAX1-NEXT: vadd.vv v13, v18, v26 ; LMULMAX1-NEXT: vadd.vv v14, v19, v25 ; LMULMAX1-NEXT: vadd.vv v15, v20, v24 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v0 = add <32 x i32> %y, %z ret <32 x i32> %v0 @@ -1040,8 +1106,12 @@ ; LMULMAX8-NEXT: call split_vector_args@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra +; LMULMAX8-NEXT: .cfi_restore s0 ; LMULMAX8-NEXT: addi sp, sp, 384 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: call_split_vector_args: @@ -1073,8 +1143,12 @@ ; LMULMAX4-NEXT: call split_vector_args@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_def_cfa sp, 384 ; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra +; LMULMAX4-NEXT: .cfi_restore s0 ; LMULMAX4-NEXT: addi sp, sp, 384 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: call_split_vector_args: @@ -1113,8 +1187,12 @@ ; LMULMAX2-NEXT: call split_vector_args@plt ; LMULMAX2-NEXT: addi sp, s0, -256 ; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_def_cfa sp, 256 ; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_restore ra +; LMULMAX2-NEXT: .cfi_restore s0 ; LMULMAX2-NEXT: addi sp, sp, 256 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: call_split_vector_args: @@ -1167,8 +1245,12 @@ ; LMULMAX1-NEXT: call split_vector_args@plt ; LMULMAX1-NEXT: addi sp, s0, -256 ; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_def_cfa sp, 256 ; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_restore ra +; LMULMAX1-NEXT: .cfi_restore s0 ; LMULMAX1-NEXT: addi sp, sp, 256 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <2 x i32>, <2 x i32>* %pa %b = load <32 x i32>, <32 x i32>* %pb @@ -1189,6 +1271,7 @@ ; LMULMAX8-NEXT: vle32.v v16, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: addi sp, sp, 16 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_via_stack: @@ -1203,6 +1286,7 @@ ; LMULMAX4-NEXT: vadd.vv v8, v8, v16 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20 ; LMULMAX4-NEXT: addi sp, sp, 16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: vector_arg_via_stack: @@ -1223,6 +1307,7 @@ ; LMULMAX2-NEXT: vadd.vv v12, v12, v20 ; LMULMAX2-NEXT: vadd.vv v14, v14, v22 ; LMULMAX2-NEXT: addi sp, sp, 16 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: vector_arg_via_stack: @@ -1255,6 +1340,7 @@ ; LMULMAX1-NEXT: vadd.vv v14, v14, v17 ; LMULMAX1-NEXT: vadd.vv v15, v15, v16 ; LMULMAX1-NEXT: addi sp, sp, 16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %s = add <32 x i32> %x, %z ret <32 x i32> %s @@ -1285,7 +1371,9 @@ ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra ; LMULMAX8-NEXT: addi sp, sp, 144 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: pass_vector_arg_via_stack: @@ -1314,7 +1402,9 @@ ; LMULMAX4-NEXT: vmv4r.v v20, v8 ; LMULMAX4-NEXT: call vector_arg_via_stack@plt ; LMULMAX4-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra ; LMULMAX4-NEXT: addi sp, sp, 144 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: pass_vector_arg_via_stack: @@ -1351,7 +1441,9 @@ ; LMULMAX2-NEXT: vmv2r.v v22, v8 ; LMULMAX2-NEXT: call vector_arg_via_stack@plt ; LMULMAX2-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_restore ra ; LMULMAX2-NEXT: addi sp, sp, 144 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: pass_vector_arg_via_stack: @@ -1404,7 +1496,9 @@ ; LMULMAX1-NEXT: vmv1r.v v23, v8 ; LMULMAX1-NEXT: call vector_arg_via_stack@plt ; LMULMAX1-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_restore ra ; LMULMAX1-NEXT: addi sp, sp, 144 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %s = call <32 x i32> @vector_arg_via_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8) ret <32 x i32> %s @@ -1421,6 +1515,7 @@ ; CHECK-NEXT: addi a0, sp, 152 ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <4 x i1> %10 } @@ -1462,7 +1557,9 @@ ; LMULMAX8-NEXT: vmv8r.v v16, v8 ; LMULMAX8-NEXT: call vector_mask_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX8-NEXT: .cfi_restore ra ; LMULMAX8-NEXT: addi sp, sp, 160 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: pass_vector_mask_arg_via_stack: @@ -1502,7 +1599,9 @@ ; LMULMAX4-NEXT: vmv4r.v v20, v8 ; LMULMAX4-NEXT: call vector_mask_arg_via_stack@plt ; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX4-NEXT: .cfi_restore ra ; LMULMAX4-NEXT: addi sp, sp, 160 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: pass_vector_mask_arg_via_stack: @@ -1550,7 +1649,9 @@ ; LMULMAX2-NEXT: vmv2r.v v22, v8 ; LMULMAX2-NEXT: call vector_mask_arg_via_stack@plt ; LMULMAX2-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX2-NEXT: .cfi_restore ra ; LMULMAX2-NEXT: addi sp, sp, 160 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: pass_vector_mask_arg_via_stack: @@ -1614,7 +1715,9 @@ ; LMULMAX1-NEXT: vmv1r.v v23, v8 ; LMULMAX1-NEXT: call vector_mask_arg_via_stack@plt ; LMULMAX1-NEXT: ld ra, 152(sp) # 8-byte Folded Reload +; LMULMAX1-NEXT: .cfi_restore ra ; LMULMAX1-NEXT: addi sp, sp, 160 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %r = call <4 x i1> @vector_mask_arg_via_stack(i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8, <4 x i1> %v, <4 x i1> %v) ret <4 x i1> %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -22,6 +22,7 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -56,6 +57,7 @@ ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v8i16: @@ -82,6 +84,7 @@ ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v8i16: @@ -108,6 +111,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v8i16: @@ -134,6 +138,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -169,6 +174,7 @@ ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v4i32: @@ -196,6 +202,7 @@ ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v4i32: @@ -223,6 +230,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v4i32: @@ -250,6 +258,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -298,6 +307,7 @@ ; LMULMAX2-RV32-NEXT: addi a1, zero, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v2i64: @@ -348,6 +358,7 @@ ; LMULMAX2-RV64-NEXT: addi a1, zero, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v2i64: @@ -388,6 +399,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, zero, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v2i64: @@ -438,6 +450,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, zero, 56 ; LMULMAX1-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -466,6 +479,7 @@ ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vand.vi v8, v8, 15 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ctpop_v32i8: @@ -498,6 +512,7 @@ ; LMULMAX1-NEXT: vand.vi v9, v9, 15 ; LMULMAX1-NEXT: vse8.v v9, (a0) ; LMULMAX1-NEXT: vse8.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -532,6 +547,7 @@ ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v16i16: @@ -558,6 +574,7 @@ ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 8 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v16i16: @@ -599,6 +616,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v9, 8 ; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v16i16: @@ -640,6 +658,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v9, 8 ; LMULMAX1-RV64-NEXT: vse16.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -675,6 +694,7 @@ ; LMULMAX2-RV32-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v8i32: @@ -702,6 +722,7 @@ ; LMULMAX2-RV64-NEXT: vmul.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vsrl.vi v8, v8, 24 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v8i32: @@ -744,6 +765,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v9, 24 ; LMULMAX1-RV32-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v8i32: @@ -786,6 +808,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v9, 24 ; LMULMAX1-RV64-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -834,6 +857,7 @@ ; LMULMAX2-RV32-NEXT: addi a1, zero, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: ctpop_v4i64: @@ -884,6 +908,7 @@ ; LMULMAX2-RV64-NEXT: addi a1, zero, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v8, v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ctpop_v4i64: @@ -939,6 +964,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v9, a2 ; LMULMAX1-RV32-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v8, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ctpop_v4i64: @@ -1004,6 +1030,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vx v9, v9, a1 ; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v8, (a6) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -46,6 +47,7 @@ ; RV32-NEXT: sw a5, 0(a0) ; RV32-NEXT: sw a1, 12(a0) ; RV32-NEXT: sw a3, 4(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: add_v2i64: @@ -58,6 +60,7 @@ ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: sd a1, 8(a0) ; RV64-NEXT: sd a3, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -75,6 +78,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load <2 x i32>, <2 x i32>* %y @@ -97,6 +101,7 @@ ; RV32-NEXT: add a2, a3, a2 ; RV32-NEXT: sw a1, 0(a0) ; RV32-NEXT: sw a2, 4(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: add_v1i64: @@ -105,6 +110,7 @@ ; RV64-NEXT: ld a1, 0(a1) ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: sd a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <1 x i64>, <1 x i64>* %x %b = load <1 x i64>, <1 x i64>* %y @@ -122,6 +128,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -142,6 +149,7 @@ ; CHECK-NEXT: fadd.d ft0, ft0, ft3 ; CHECK-NEXT: fsd ft0, 8(a0) ; CHECK-NEXT: fsd ft1, 0(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -159,6 +167,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %b = load <2 x float>, <2 x float>* %y @@ -175,6 +184,7 @@ ; CHECK-NEXT: fld ft1, 0(a1) ; CHECK-NEXT: fadd.d ft0, ft0, ft1 ; CHECK-NEXT: fsd ft0, 0(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <1 x double>, <1 x double>* %x %b = load <1 x double>, <1 x double>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i1>, <2 x i1>* %x %z = sext <2 x i1> %y to <2 x i16> @@ -25,6 +26,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = sext <2 x i8> %y to <2 x i16> @@ -38,6 +40,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = zext <2 x i8> %y to <2 x i16> @@ -51,6 +54,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = sext <2 x i8> %y to <2 x i32> @@ -64,6 +68,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = zext <2 x i8> %y to <2 x i32> @@ -77,6 +82,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = sext <2 x i8> %y to <2 x i64> @@ -90,6 +96,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i8>, <2 x i8>* %x %z = zext <2 x i8> %y to <2 x i64> @@ -103,6 +110,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = sext <4 x i8> %y to <4 x i16> @@ -116,6 +124,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = zext <4 x i8> %y to <4 x i16> @@ -129,6 +138,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = sext <4 x i8> %y to <4 x i32> @@ -142,6 +152,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = zext <4 x i8> %y to <4 x i32> @@ -158,6 +169,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf8 v9, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i8_v4i64: @@ -166,6 +178,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf8 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = sext <4 x i8> %y to <4 x i64> @@ -182,6 +195,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf8 v9, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i8_v4i64: @@ -190,6 +204,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf8 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i8>, <4 x i8>* %x %z = zext <4 x i8> %y to <4 x i64> @@ -203,6 +218,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = sext <8 x i8> %y to <8 x i16> @@ -216,6 +232,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = zext <8 x i8> %y to <8 x i16> @@ -232,6 +249,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf4 v9, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i8_v8i32: @@ -240,6 +258,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf4 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = sext <8 x i8> %y to <8 x i32> @@ -256,6 +275,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf4 v9, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i8_v8i32: @@ -264,6 +284,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf4 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = zext <8 x i8> %y to <8 x i32> @@ -288,6 +309,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf8 v11, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i8_v8i64: @@ -296,6 +318,7 @@ ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf8 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = sext <8 x i8> %y to <8 x i64> @@ -320,6 +343,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf8 v11, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i8_v8i64: @@ -328,6 +352,7 @@ ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf8 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i8>, <8 x i8>* %x %z = zext <8 x i8> %y to <8 x i64> @@ -344,6 +369,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i16: @@ -352,6 +378,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = sext <16 x i8> %y to <16 x i16> @@ -368,6 +395,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i16: @@ -376,6 +404,7 @@ ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = zext <16 x i8> %y to <16 x i16> @@ -400,6 +429,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf4 v11, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i32: @@ -408,6 +438,7 @@ ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf4 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = sext <16 x i8> %y to <16 x i32> @@ -432,6 +463,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf4 v11, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i32: @@ -440,6 +472,7 @@ ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf4 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = zext <16 x i8> %y to <16 x i32> @@ -480,6 +513,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf8 v15, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i64: @@ -491,6 +525,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf8 v12, v8 ; LMULMAX4-NEXT: vsext.vf8 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = sext <16 x i8> %y to <16 x i64> @@ -531,6 +566,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf8 v15, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i64: @@ -542,6 +578,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf8 v12, v8 ; LMULMAX4-NEXT: vzext.vf8 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i8>, <16 x i8>* %x %z = zext <16 x i8> %y to <16 x i64> @@ -563,6 +600,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i8> %x to <2 x i1> store <2 x i1> %y, <2 x i1>* %z @@ -575,6 +613,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i16> %x to <2 x i8> store <2 x i8> %y, <2 x i8>* %z @@ -588,6 +627,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i16>, <2 x i16>* %x %z = sext <2 x i16> %y to <2 x i32> @@ -601,6 +641,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i16>, <2 x i16>* %x %z = zext <2 x i16> %y to <2 x i32> @@ -614,6 +655,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i16>, <2 x i16>* %x %z = sext <2 x i16> %y to <2 x i64> @@ -627,6 +669,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i16>, <2 x i16>* %x %z = zext <2 x i16> %y to <2 x i64> @@ -639,6 +682,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <4 x i16> %x to <4 x i8> store <4 x i8> %y, <4 x i8>* %z @@ -652,6 +696,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i16>, <4 x i16>* %x %z = sext <4 x i16> %y to <4 x i32> @@ -665,6 +710,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <4 x i16>, <4 x i16>* %x %z = zext <4 x i16> %y to <4 x i32> @@ -681,6 +727,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf4 v9, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i16_v4i64: @@ -689,6 +736,7 @@ ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf4 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i16>, <4 x i16>* %x %z = sext <4 x i16> %y to <4 x i64> @@ -705,6 +753,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf4 v9, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i16_v4i64: @@ -713,6 +762,7 @@ ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf4 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i16>, <4 x i16>* %x %z = zext <4 x i16> %y to <4 x i64> @@ -725,6 +775,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <8 x i16> %x to <8 x i8> store <8 x i8> %y, <8 x i8>* %z @@ -741,6 +792,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i16_v8i32: @@ -749,6 +801,7 @@ ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i16>, <8 x i16>* %x %z = sext <8 x i16> %y to <8 x i32> @@ -765,6 +818,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i16_v8i32: @@ -773,6 +827,7 @@ ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i16>, <8 x i16>* %x %z = zext <8 x i16> %y to <8 x i32> @@ -797,6 +852,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf4 v11, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i16_v8i64: @@ -805,6 +861,7 @@ ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf4 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i16>, <8 x i16>* %x %z = sext <8 x i16> %y to <8 x i64> @@ -829,6 +886,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf4 v11, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i16_v8i64: @@ -837,6 +895,7 @@ ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf4 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i16>, <8 x i16>* %x %z = zext <8 x i16> %y to <8 x i64> @@ -857,6 +916,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 8 ; LMULMAX1-NEXT: vse8.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: @@ -864,6 +924,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse8.v v10, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i16> %x to <16 x i8> store <16 x i8> %y, <16 x i8>* %z @@ -887,6 +948,7 @@ ; LMULMAX1-NEXT: vsext.vf2 v11, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: vsext.vf2 v10, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i16_v16i32: @@ -895,6 +957,7 @@ ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i16>, <16 x i16>* %x %z = sext <16 x i16> %y to <16 x i32> @@ -918,6 +981,7 @@ ; LMULMAX1-NEXT: vzext.vf2 v11, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: vzext.vf2 v10, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i16_v16i32: @@ -926,6 +990,7 @@ ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i16>, <16 x i16>* %x %z = zext <16 x i16> %y to <16 x i32> @@ -965,6 +1030,7 @@ ; LMULMAX1-NEXT: vsext.vf4 v15, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 ; LMULMAX1-NEXT: vsext.vf4 v12, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i16_v16i64: @@ -976,6 +1042,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf4 v12, v8 ; LMULMAX4-NEXT: vsext.vf4 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i16>, <16 x i16>* %x %z = sext <16 x i16> %y to <16 x i64> @@ -1015,6 +1082,7 @@ ; LMULMAX1-NEXT: vzext.vf4 v15, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 ; LMULMAX1-NEXT: vzext.vf4 v12, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i16_v16i64: @@ -1026,6 +1094,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf4 v12, v8 ; LMULMAX4-NEXT: vzext.vf4 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i16>, <16 x i16>* %x %z = zext <16 x i16> %y to <16 x i64> @@ -1040,6 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i8> store <2 x i8> %y, <2 x i8>* %z @@ -1052,6 +1122,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i32> %x to <2 x i16> store <2 x i16> %y, <2 x i16>* %z @@ -1065,6 +1136,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i32>, <2 x i32>* %x %z = sext <2 x i32> %y to <2 x i64> @@ -1078,6 +1150,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load <2 x i32>, <2 x i32>* %x %z = zext <2 x i32> %y to <2 x i64> @@ -1092,6 +1165,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i8> store <4 x i8> %y, <4 x i8>* %z @@ -1104,6 +1178,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <4 x i32> %x to <4 x i16> store <4 x i16> %y, <4 x i16>* %z @@ -1120,6 +1195,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i32_v4i64: @@ -1128,6 +1204,7 @@ ; LMULMAX4-NEXT: vle32.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i32>, <4 x i32>* %x %z = sext <4 x i32> %y to <4 x i64> @@ -1144,6 +1221,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i32_v4i64: @@ -1152,6 +1230,7 @@ ; LMULMAX4-NEXT: vle32.v v10, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v8, v10 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <4 x i32>, <4 x i32>* %x %z = zext <4 x i32> %y to <4 x i64> @@ -1176,6 +1255,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse8.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: @@ -1185,6 +1265,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i8> store <8 x i8> %y, <8 x i8>* %z @@ -1205,6 +1286,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse16.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: @@ -1212,6 +1294,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse16.v v10, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <8 x i32> %x to <8 x i16> store <8 x i16> %y, <8 x i16>* %z @@ -1235,6 +1318,7 @@ ; LMULMAX1-NEXT: vsext.vf2 v11, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: vsext.vf2 v10, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i32_v8i64: @@ -1243,6 +1327,7 @@ ; LMULMAX4-NEXT: vle32.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i32>, <8 x i32>* %x %z = sext <8 x i32> %y to <8 x i64> @@ -1266,6 +1351,7 @@ ; LMULMAX1-NEXT: vzext.vf2 v11, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: vzext.vf2 v10, v12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i32_v8i64: @@ -1274,6 +1360,7 @@ ; LMULMAX4-NEXT: vle32.v v12, (a0) ; LMULMAX4-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v8, v12 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <8 x i32>, <8 x i32>* %x %z = zext <8 x i32> %y to <8 x i64> @@ -1310,6 +1397,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 12 ; LMULMAX1-NEXT: vse8.v v12, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: @@ -1319,6 +1407,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i8> store <16 x i8> %y, <16 x i8>* %z @@ -1350,6 +1439,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v12, (a1) ; LMULMAX1-NEXT: vse16.v v13, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: @@ -1357,6 +1447,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse16.v v12, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i32> %x to <16 x i16> store <16 x i16> %y, <16 x i16>* %z @@ -1394,6 +1485,7 @@ ; LMULMAX1-NEXT: vsext.vf2 v10, v12 ; LMULMAX1-NEXT: vsext.vf2 v12, v14 ; LMULMAX1-NEXT: vsext.vf2 v14, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i32_v16i64: @@ -1405,6 +1497,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vsext.vf2 v12, v8 ; LMULMAX4-NEXT: vsext.vf2 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i32>, <16 x i32>* %x %z = sext <16 x i32> %y to <16 x i64> @@ -1442,6 +1535,7 @@ ; LMULMAX1-NEXT: vzext.vf2 v10, v12 ; LMULMAX1-NEXT: vzext.vf2 v12, v14 ; LMULMAX1-NEXT: vzext.vf2 v14, v16 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i32_v16i64: @@ -1453,6 +1547,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vzext.vf2 v12, v8 ; LMULMAX4-NEXT: vzext.vf2 v8, v16 +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = load <16 x i32>, <16 x i32>* %x %z = zext <16 x i32> %y to <16 x i64> @@ -1469,6 +1564,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i8> store <2 x i8> %y, <2 x i8>* %z @@ -1483,6 +1579,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i16> store <2 x i16> %y, <2 x i16>* %z @@ -1495,6 +1592,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = trunc <2 x i64> %x to <2 x i32> store <2 x i32> %y, <2 x i32>* %z @@ -1523,6 +1621,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX1-NEXT: vse8.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: @@ -1534,6 +1633,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i8> store <4 x i8> %y, <4 x i8>* %z @@ -1558,6 +1658,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX1-NEXT: vse16.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: @@ -1567,6 +1668,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i16> store <4 x i16> %y, <4 x i16>* %z @@ -1587,6 +1689,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX1-NEXT: vse32.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: @@ -1594,6 +1697,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse32.v v10, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <4 x i64> %x to <4 x i32> store <4 x i32> %y, <4 x i32>* %z @@ -1638,6 +1742,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse8.v v12, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: @@ -1649,6 +1754,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i8> store <8 x i8> %y, <8 x i8>* %z @@ -1685,6 +1791,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse16.v v12, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: @@ -1694,6 +1801,7 @@ ; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i16> store <8 x i16> %y, <8 x i16>* %z @@ -1725,6 +1833,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v12, (a1) ; LMULMAX1-NEXT: vse32.v v13, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: @@ -1732,6 +1841,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse32.v v12, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <8 x i64> %x to <8 x i32> store <8 x i32> %y, <8 x i32>* %z @@ -1808,6 +1918,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: @@ -1831,6 +1942,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v9, v8, 8 ; LMULMAX4-NEXT: vse8.v v9, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i8> store <16 x i8> %y, <16 x i8>* %z @@ -1894,6 +2006,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v16, (a1) ; LMULMAX1-NEXT: vse16.v v17, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: @@ -1913,6 +2026,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v10, v12, 8 ; LMULMAX4-NEXT: vse16.v v10, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i16> store <16 x i16> %y, <16 x i16>* %z @@ -1966,6 +2080,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: vse32.v v17, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: @@ -1980,6 +2095,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v16, 8 ; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i32> store <16 x i32> %y, <16 x i32>* %z @@ -1992,6 +2108,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2006,6 +2123,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2018,6 +2136,7 @@ ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2032,6 +2151,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2044,6 +2164,7 @@ ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2058,6 +2179,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2070,6 +2192,7 @@ ; CHECK-NEXT: vl4re16.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2088,6 +2211,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v18 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2100,6 +2224,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2112,6 +2237,7 @@ ; CHECK-NEXT: vl1re32.v v10, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2124,6 +2250,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2136,6 +2263,7 @@ ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2148,6 +2276,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2160,6 +2289,7 @@ ; CHECK-NEXT: vl4re32.v v16, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2172,6 +2302,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2185,6 +2316,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v24 ; CHECK-NEXT: vfwcvt.f.f.v v16, v28 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = load , * %x %z = fpext %y to @@ -2199,6 +2331,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2211,6 +2344,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2225,6 +2359,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2237,6 +2372,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2251,6 +2387,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2263,6 +2400,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2281,6 +2419,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vs4r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z @@ -2294,6 +2433,7 @@ ; CHECK-NEXT: vfncvt.f.f.w v24, v8 ; CHECK-NEXT: vfncvt.f.f.w v28, v16 ; CHECK-NEXT: vs8r.v v24, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = fptrunc %x to store %y, * %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2) @@ -39,6 +41,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0) @@ -55,6 +58,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v8, 6 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6) @@ -69,6 +73,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX2-NEXT: vse32.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_0: @@ -77,6 +82,7 @@ ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0) @@ -93,6 +99,7 @@ ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 2 ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX2-NEXT: vse32.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_2: @@ -103,6 +110,7 @@ ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2) @@ -119,6 +127,7 @@ ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 6 ; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX2-NEXT: vse32.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_6: @@ -130,6 +139,7 @@ ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 ; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6) @@ -142,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32( %x, i64 0) store <2 x i32> %c, <2 x i32>* %y @@ -155,6 +166,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v8, 6 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32( %x, i64 6) store <2 x i32> %c, <2 x i32>* %y @@ -166,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %x, i64 0) store <2 x i8> %c, <2 x i8>* %y @@ -179,6 +192,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %x, i64 2) store <2 x i8> %c, <2 x i8>* %y @@ -192,6 +206,7 @@ ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 8 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i32_nxv16i32_8: @@ -203,6 +218,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v16, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %c = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32( %x, i64 8) store <8 x i32> %c, <8 x i32>* %y @@ -217,6 +233,7 @@ ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_0: @@ -225,6 +242,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0) @@ -242,6 +260,7 @@ ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_8: @@ -252,6 +271,7 @@ ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8) @@ -270,6 +290,7 @@ ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 2 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_48: @@ -279,6 +300,7 @@ ; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48) @@ -291,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y @@ -302,6 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y @@ -315,6 +339,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v0, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 8) store <8 x i1> %c, <8 x i1>* %y @@ -328,6 +353,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v0, 6 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 48) store <8 x i1> %c, <8 x i1>* %y @@ -351,6 +377,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_0: @@ -367,6 +394,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0) @@ -395,6 +423,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_2: @@ -416,6 +445,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2) @@ -445,6 +475,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_42: @@ -467,6 +498,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42) @@ -487,6 +519,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y @@ -512,6 +545,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y @@ -531,6 +565,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y @@ -556,6 +591,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y @@ -582,6 +618,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 42) store <2 x i1> %c, <2 x i1>* %y @@ -607,6 +644,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1( %x, i64 26) store <2 x i1> %c, <2 x i1>* %y @@ -620,6 +658,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v0, 2 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1( %x, i64 16) store <8 x i1> %c, <8 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x half> %a to i16 ret i16 %b @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fmv.x.h a0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x half> %a to half ret half %b @@ -28,6 +30,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <2 x half> %a to i32 ret i32 %b @@ -38,6 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast <1 x float> %a to i32 ret i32 %b @@ -48,6 +52,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.x.s a0, v8 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v2f16_f32: @@ -55,6 +60,7 @@ ; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.f.s ft0, v8 ; RV64-FP-NEXT: fmv.x.w a0, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <2 x half> %a to float ret float %b @@ -65,6 +71,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.x.s a0, v8 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v1f32_f32: @@ -72,6 +79,7 @@ ; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.f.s ft0, v8 ; RV64-FP-NEXT: fmv.x.w a0, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <1 x float> %a to float ret float %b @@ -85,12 +93,14 @@ ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v4f16_i64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <4 x half> %a to i64 ret i64 %b @@ -104,12 +114,14 @@ ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v2f32_i64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <2 x float> %a to i64 ret i64 %b @@ -123,12 +135,14 @@ ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v1f64_i64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <1 x double> %a to i64 ret i64 %b @@ -145,12 +159,14 @@ ; RV32-FP-NEXT: lw a0, 8(sp) ; RV32-FP-NEXT: lw a1, 12(sp) ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v4f16_f64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <4 x half> %a to double ret double %b @@ -167,12 +183,14 @@ ; RV32-FP-NEXT: lw a0, 8(sp) ; RV32-FP-NEXT: lw a1, 12(sp) ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v2f32_f64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <2 x float> %a to double ret double %b @@ -189,12 +207,14 @@ ; RV32-FP-NEXT: lw a0, 8(sp) ; RV32-FP-NEXT: lw a1, 12(sp) ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v1f64_f64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast <1 x double> %a to double ret double %b @@ -205,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast i16 %a to <1 x half> ret <1 x half> %b @@ -215,12 +236,14 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i32_v2f16: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vmv.v.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast i32 %a to <2 x half> ret <2 x half> %b @@ -231,12 +254,14 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i32_v1f32: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vmv.v.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast i32 %a to <1 x float> ret <1 x float> %b @@ -251,12 +276,14 @@ ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v4f16: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <4 x half> ret <4 x half> %b @@ -271,12 +298,14 @@ ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v2f32: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <2 x float> ret <2 x float> %b @@ -291,12 +320,14 @@ ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v1f64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <1 x double> ret <1 x double> %b @@ -308,6 +339,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast half %a to <1 x i16> ret <1 x i16> %b @@ -319,6 +351,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %b = bitcast half %a to <1 x half> ret <1 x half> %b @@ -329,6 +362,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f32_v2i16: @@ -336,6 +370,7 @@ ; RV64-FP-NEXT: fmv.w.x ft0, a0 ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.s.f v8, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast float %a to <2 x i16> ret <2 x i16> %b @@ -346,6 +381,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f32_v2f16: @@ -353,6 +389,7 @@ ; RV64-FP-NEXT: fmv.w.x ft0, a0 ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.s.f v8, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast float %a to <2 x half> ret <2 x half> %b @@ -363,6 +400,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f32_v1i32: @@ -370,6 +408,7 @@ ; RV64-FP-NEXT: fmv.w.x ft0, a0 ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.s.f v8, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast float %a to <1 x i32> ret <1 x i32> %b @@ -380,6 +419,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.s.x v8, a0 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f32_v1f32: @@ -387,6 +427,7 @@ ; RV64-FP-NEXT: fmv.w.x ft0, a0 ; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.s.f v8, ft0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast float %a to <1 x float> ret <1 x float> %b @@ -403,12 +444,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v4i16: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <4 x i16> ret <4 x i16> %b @@ -425,12 +468,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v4f16: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <4 x half> ret <4 x half> %b @@ -447,12 +492,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v2i32: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <2 x i32> ret <2 x i32> %b @@ -469,12 +516,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v2f32: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <2 x float> ret <2 x float> %b @@ -491,12 +540,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v1i64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <1 x i64> ret <1 x i64> %b @@ -513,12 +564,14 @@ ; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.s.f v8, ft0 ; RV32-FP-NEXT: addi sp, sp, 16 +; RV32-FP-NEXT: .cfi_def_cfa_offset 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_f64_v1f64: ; RV64-FP: # %bb.0: ; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.s.x v8, a0 +; RV64-FP-NEXT: .cfi_def_cfa_offset 0 ; RV64-FP-NEXT: ret %b = bitcast double %a to <1 x double> ret <1 x double> %b diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x float> , <4 x float>* %x ret void @@ -53,6 +54,7 @@ ; LMULMAX1-NEXT: addi a0, sp, 16 ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi sp, sp, 32 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization: @@ -74,6 +76,7 @@ ; LMULMAX2-NEXT: vmv.s.x v0, a0 ; LMULMAX2-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-NEXT: vmerge.vvm v8, v8, v12, v0 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %z = shufflevector <8 x float> %x, <8 x float> %y, <4 x i32> ret <4 x float> %z @@ -90,6 +93,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x float> , <2 x float>* %x ret void @@ -106,6 +110,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x float> , <2 x float>* %x ret void @@ -124,6 +129,7 @@ ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x float> , <4 x float>* %x ret void @@ -140,6 +146,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v0 = insertelement <4 x float> undef, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 0.0, i32 1 @@ -161,6 +168,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v0 = insertelement <4 x float> undef, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 2.0, i32 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %d = fpext <2 x half> %a to <2 x float> @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %d = fpext <2 x half> %a to <2 x double> @@ -41,6 +43,7 @@ ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.f.v v10, v8 ; LMULMAX8-NEXT: vse32.v v10, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fpext_v8f16_v8f32: @@ -55,6 +58,7 @@ ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v10, (a0) ; LMULMAX1-NEXT: vse32.v v9, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x half>, <8 x half>* %x %d = fpext <8 x half> %a to <8 x float> @@ -71,6 +75,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vfwcvt.f.f.v v12, v10 ; LMULMAX8-NEXT: vse64.v v12, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fpext_v8f16_v8f64: @@ -106,6 +111,7 @@ ; LMULMAX1-NEXT: vse64.v v11, (a0) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x half>, <8 x half>* %x %d = fpext <8 x half> %a to <8 x double> @@ -121,6 +127,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %d = fptrunc <2 x float> %a to <2 x half> @@ -138,6 +145,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %d = fptrunc <2 x double> %a to <2 x half> @@ -153,6 +161,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX8-NEXT: vfncvt.f.f.w v10, v8 ; LMULMAX8-NEXT: vse16.v v10, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fpround_v8f32_v8f16: @@ -172,6 +181,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v10, 4 ; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptrunc <8 x float> %a to <8 x half> @@ -189,6 +199,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fpround_v8f64_v8f16: @@ -228,6 +239,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse16.v v12, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptrunc <8 x double> %a to <8 x half> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -26,6 +27,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -51,6 +53,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -76,6 +79,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -101,6 +105,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -126,6 +131,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -142,6 +148,7 @@ ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -158,6 +165,7 @@ ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -174,6 +182,7 @@ ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -190,6 +199,7 @@ ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -215,6 +225,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -240,6 +251,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -258,6 +270,7 @@ ; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = load <32 x half>, <32 x half>* %y @@ -275,6 +288,7 @@ ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v8, v12 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = load <32 x half>, <32 x half>* %y @@ -292,6 +306,7 @@ ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = load <16 x float>, <16 x float>* %y @@ -308,6 +323,7 @@ ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = load <16 x float>, <16 x float>* %y @@ -325,6 +341,7 @@ ; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = load <8 x double>, <8 x double>* %y @@ -341,6 +358,7 @@ ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = load <8 x double>, <8 x double>* %y @@ -359,6 +377,7 @@ ; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmnand.mm v8, v24, v24 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = load <64 x half>, <64 x half>* %y @@ -376,6 +395,7 @@ ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmflt.vv v24, v16, v8 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = load <64 x half>, <64 x half>* %y @@ -395,6 +415,7 @@ ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmnor.mm v8, v25, v24 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = load <32 x float>, <32 x float>* %y @@ -412,6 +433,7 @@ ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmfeq.vv v24, v8, v16 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = load <32 x float>, <32 x float>* %y @@ -430,6 +452,7 @@ ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmor.mm v8, v25, v24 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = load <16 x double>, <16 x double>* %y @@ -446,6 +469,7 @@ ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmfne.vv v24, v8, v16 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = load <16 x double>, <16 x double>* %y @@ -473,6 +497,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = load <4 x half>, <4 x half>* %y @@ -500,6 +525,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = load <2 x half>, <2 x half>* %y @@ -515,6 +541,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -531,6 +558,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -556,6 +584,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -581,6 +610,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -606,6 +636,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -631,6 +662,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -647,6 +679,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -663,6 +696,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -679,6 +713,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -695,6 +730,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -720,6 +756,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -745,6 +782,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -763,6 +801,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -780,6 +819,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -797,6 +837,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -813,6 +854,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -830,6 +872,7 @@ ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -846,6 +889,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -864,6 +908,7 @@ ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -881,6 +926,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -900,6 +946,7 @@ ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v8, v17, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -917,6 +964,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -935,6 +983,7 @@ ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v8, v17, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -951,6 +1000,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -979,6 +1029,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = insertelement <4 x half> undef, half %y, i32 0 @@ -1007,6 +1058,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = insertelement <2 x half> undef, half %y, i32 0 @@ -1023,6 +1075,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1039,6 +1092,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1064,6 +1118,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1089,6 +1144,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1114,6 +1170,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1139,6 +1196,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1155,6 +1213,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -1171,6 +1230,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -1187,6 +1247,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -1203,6 +1264,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -1228,6 +1290,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -1253,6 +1316,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -1271,6 +1335,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -1288,6 +1353,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -1305,6 +1371,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -1321,6 +1388,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -1338,6 +1406,7 @@ ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v12, v12 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -1354,6 +1423,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -1372,6 +1442,7 @@ ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v8, v16, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -1389,6 +1460,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -1408,6 +1480,7 @@ ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v8, v17, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -1425,6 +1498,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -1443,6 +1517,7 @@ ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v8, v17, v16 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -1459,6 +1534,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -1487,6 +1563,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = insertelement <4 x half> undef, half %y, i32 0 @@ -1515,6 +1592,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = insertelement <2 x half> undef, half %y, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> ret <4 x half> %s @@ -23,6 +24,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> ret <8 x float> %s @@ -38,6 +40,7 @@ ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_fv_v4f64: @@ -49,6 +52,7 @@ ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s @@ -64,6 +68,7 @@ ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_vf_v4f64: @@ -75,6 +80,7 @@ ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s @@ -90,6 +96,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v4f64: @@ -100,6 +107,7 @@ ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> ret <4 x double> %s @@ -115,6 +123,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v4f64: @@ -125,6 +134,7 @@ ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> undef, <4 x double> %x, <4 x i32> ret <4 x double> %s @@ -145,6 +155,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vrgather.vi v12, v10, 1, v0.t ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v4f64: @@ -160,6 +171,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vrgather.vi v12, v10, 1, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> ret <4 x double> %s @@ -180,6 +192,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_xv_v4f64: @@ -195,6 +208,7 @@ ; RV64-NEXT: vrsub.vi v12, v12, 4 ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s @@ -215,6 +229,7 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vx_v4f64: @@ -231,6 +246,7 @@ ; RV64-NEXT: vlse64.v v10, (a0), zero ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x half> undef, half %y, i32 0 %b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer @@ -23,6 +24,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x float> undef, float %y, i32 0 %b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer @@ -36,6 +38,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <2 x double> undef, double %y, i32 0 %b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer @@ -49,6 +52,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_16f16: @@ -58,6 +62,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a1) ; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <16 x half> undef, half %y, i32 0 %b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v8f32: @@ -80,6 +86,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <8 x float> undef, float %y, i32 0 %b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer @@ -93,6 +100,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v4f64: @@ -102,6 +110,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: vse64.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <4 x double> undef, double %y, i32 0 %b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer @@ -115,6 +124,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x half> undef, half 0.0, i32 0 %b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer @@ -128,6 +138,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x float> undef, float 0.0, i32 0 %b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer @@ -141,6 +152,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <2 x double> undef, double 0.0, i32 0 %b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer @@ -154,6 +166,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_16f16: @@ -163,6 +176,7 @@ ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <16 x half> undef, half 0.0, i32 0 %b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer @@ -176,6 +190,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v8f32: @@ -185,6 +200,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <8 x float> undef, float 0.0, i32 0 %b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer @@ -198,6 +214,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v4f64: @@ -207,6 +224,7 @@ ; LMULMAX1-NEXT: vse64.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse64.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <4 x double> undef, double 0.0, i32 0 %b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = extractelement <8 x half> %a, i32 5 @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = extractelement <4 x float> %a, i32 2 @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = extractelement <2 x double> %a, i32 0 @@ -59,6 +62,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; LMULMAX8-NEXT: vlse16.v v8, (a1), zero ; LMULMAX8-NEXT: vse16.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64f16: @@ -81,6 +85,7 @@ ; LMULMAX1-NEXT: vse16.v v8, (a7) ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a6) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = extractelement <64 x half> %a, i32 47 @@ -98,6 +103,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; LMULMAX8-NEXT: vlse32.v v8, (a1), zero ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v32f32: @@ -120,6 +126,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a7) ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: vse32.v v8, (a6) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = extractelement <32 x float> %a, i32 17 @@ -136,6 +143,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; LMULMAX8-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-NEXT: vse64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16f64: @@ -157,6 +165,7 @@ ; LMULMAX1-NEXT: vse64.v v8, (a7) ; LMULMAX1-NEXT: vse64.v v8, (a0) ; LMULMAX1-NEXT: vse64.v v8, (a6) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = extractelement <16 x double> %a, i32 10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -28,6 +29,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -44,6 +46,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -60,6 +63,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -76,6 +80,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -92,6 +97,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -108,6 +114,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -124,6 +131,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -140,6 +148,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -156,6 +165,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -172,6 +182,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -188,6 +199,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -203,6 +215,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = fneg <8 x half> %a @@ -217,6 +230,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = fneg <4 x float> %a @@ -231,6 +245,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = fneg <2 x double> %a @@ -245,6 +260,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = call <8 x half> @llvm.fabs.v8f16(<8 x half> %a) @@ -260,6 +276,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) @@ -275,6 +292,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = call <2 x double> @llvm.fabs.v2f64(<2 x double> %a) @@ -291,6 +309,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -308,6 +327,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -325,6 +345,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -341,6 +362,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -357,6 +379,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -373,6 +396,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -390,6 +414,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -407,6 +432,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -424,6 +450,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -442,6 +469,7 @@ ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = load <4 x float>, <4 x float>* %y @@ -464,6 +492,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x float>, <2 x float>* %y @@ -481,6 +510,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a) @@ -496,6 +526,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a) @@ -511,6 +542,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a) @@ -528,6 +560,7 @@ ; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -547,6 +580,7 @@ ; CHECK-NEXT: vle32.v v10, (a2) ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vse32.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -566,6 +600,7 @@ ; CHECK-NEXT: vle64.v v10, (a2) ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vse64.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -585,6 +620,7 @@ ; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vse16.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -604,6 +640,7 @@ ; CHECK-NEXT: vle32.v v10, (a2) ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vse32.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -623,6 +660,7 @@ ; CHECK-NEXT: vle64.v v10, (a2) ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vse64.v v10, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -642,6 +680,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fadd_v16f16: @@ -657,6 +696,7 @@ ; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fadd_v16f16: @@ -672,6 +712,7 @@ ; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -688,6 +729,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fadd_v8f32: @@ -703,6 +745,7 @@ ; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fadd_v8f32: @@ -718,6 +761,7 @@ ; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -734,6 +778,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fadd_v4f64: @@ -749,6 +794,7 @@ ; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fadd_v4f64: @@ -764,6 +810,7 @@ ; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -780,6 +827,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fsub_v16f16: @@ -795,6 +843,7 @@ ; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fsub_v16f16: @@ -810,6 +859,7 @@ ; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -826,6 +876,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fsub_v8f32: @@ -841,6 +892,7 @@ ; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fsub_v8f32: @@ -856,6 +908,7 @@ ; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -872,6 +925,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fsub_v4f64: @@ -887,6 +941,7 @@ ; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fsub_v4f64: @@ -902,6 +957,7 @@ ; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -918,6 +974,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fmul_v16f16: @@ -933,6 +990,7 @@ ; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fmul_v16f16: @@ -948,6 +1006,7 @@ ; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -964,6 +1023,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fmul_v8f32: @@ -979,6 +1039,7 @@ ; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fmul_v8f32: @@ -994,6 +1055,7 @@ ; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -1010,6 +1072,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fmul_v4f64: @@ -1025,6 +1088,7 @@ ; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fmul_v4f64: @@ -1040,6 +1104,7 @@ ; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -1056,6 +1121,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fdiv_v16f16: @@ -1071,6 +1137,7 @@ ; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fdiv_v16f16: @@ -1086,6 +1153,7 @@ ; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -1102,6 +1170,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fdiv_v8f32: @@ -1117,6 +1186,7 @@ ; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fdiv_v8f32: @@ -1132,6 +1202,7 @@ ; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -1148,6 +1219,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: fdiv_v4f64: @@ -1163,6 +1235,7 @@ ; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: fdiv_v4f64: @@ -1178,6 +1251,7 @@ ; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -1193,6 +1267,7 @@ ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fneg_v16f16: @@ -1205,6 +1280,7 @@ ; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 ; LMULMAX1-NEXT: vse16.v v9, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = fneg <16 x half> %a @@ -1219,6 +1295,7 @@ ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fneg_v8f32: @@ -1231,6 +1308,7 @@ ; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = fneg <8 x float> %a @@ -1245,6 +1323,7 @@ ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vfsgnjn.vv v8, v8, v8 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fneg_v4f64: @@ -1257,6 +1336,7 @@ ; LMULMAX1-NEXT: vfsgnjn.vv v9, v9, v9 ; LMULMAX1-NEXT: vse64.v v9, (a0) ; LMULMAX1-NEXT: vse64.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = fneg <4 x double> %a @@ -1273,6 +1353,7 @@ ; LMULMAX2-NEXT: vle16.v v12, (a2) ; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10 ; LMULMAX2-NEXT: vse16.v v12, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fma_v16f16: @@ -1291,6 +1372,7 @@ ; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10 ; LMULMAX1-NEXT: vse16.v v13, (a0) ; LMULMAX1-NEXT: vse16.v v12, (a3) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -1310,6 +1392,7 @@ ; LMULMAX2-NEXT: vle32.v v12, (a2) ; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10 ; LMULMAX2-NEXT: vse32.v v12, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fma_v8f32: @@ -1328,6 +1411,7 @@ ; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10 ; LMULMAX1-NEXT: vse32.v v13, (a0) ; LMULMAX1-NEXT: vse32.v v12, (a3) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -1347,6 +1431,7 @@ ; LMULMAX2-NEXT: vle64.v v12, (a2) ; LMULMAX2-NEXT: vfmacc.vv v12, v8, v10 ; LMULMAX2-NEXT: vse64.v v12, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: fma_v4f64: @@ -1365,6 +1450,7 @@ ; LMULMAX1-NEXT: vfmacc.vv v13, v8, v10 ; LMULMAX1-NEXT: vse64.v v13, (a0) ; LMULMAX1-NEXT: vse64.v v12, (a3) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -1382,6 +1468,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1398,6 +1485,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1414,6 +1502,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1430,6 +1519,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1446,6 +1536,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1462,6 +1553,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1478,6 +1570,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1494,6 +1587,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1510,6 +1604,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1526,6 +1621,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1542,6 +1638,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1558,6 +1655,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1574,6 +1672,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1590,6 +1689,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1606,6 +1706,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1622,6 +1723,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1638,6 +1740,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1654,6 +1757,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1670,6 +1774,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1686,6 +1791,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1702,6 +1808,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1718,6 +1825,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1734,6 +1842,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1750,6 +1859,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1767,6 +1877,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -1785,6 +1896,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -1803,6 +1915,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse64.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -1821,6 +1934,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -1839,6 +1953,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -1857,6 +1972,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse64.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -1875,6 +1991,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -1894,6 +2011,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -1913,6 +2031,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse64.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -1933,6 +2052,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -1952,6 +2072,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vse64.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %d = fptosi <2 x float> %a to <2 x i32> @@ -25,6 +26,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %d = fptoui <2 x float> %a to <2 x i32> @@ -39,6 +41,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptosi <2 x float> %x to <2 x i1> ret <2 x i1> %z @@ -51,6 +54,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptoui <2 x float> %x to <2 x i1> ret <2 x i1> %z @@ -63,6 +67,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.rtz.x.f.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i32: @@ -76,6 +81,7 @@ ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptosi <8 x float> %a to <8 x i32> @@ -90,6 +96,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i32: @@ -103,6 +110,7 @@ ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptoui <8 x float> %a to <8 x i32> @@ -117,6 +125,7 @@ ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v10, v8 ; LMULMAX8-NEXT: vand.vi v8, v10, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i1: @@ -147,6 +156,7 @@ ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x float> %x to <8 x i1> ret <8 x i1> %z @@ -159,6 +169,7 @@ ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; LMULMAX8-NEXT: vand.vi v8, v10, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i1: @@ -189,6 +200,7 @@ ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x float> %x to <8 x i1> ret <8 x i1> %z @@ -201,6 +213,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vse64.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %d = fptosi <2 x float> %a to <2 x i64> @@ -215,6 +228,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vse64.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x float>, <2 x float>* %x %d = fptoui <2 x float> %a to <2 x i64> @@ -229,6 +243,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; LMULMAX8-NEXT: vse64.v v12, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i64: @@ -254,6 +269,7 @@ ; LMULMAX1-NEXT: vse64.v v11, (a0) ; LMULMAX1-NEXT: addi a0, a1, 32 ; LMULMAX1-NEXT: vse64.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptosi <8 x float> %a to <8 x i64> @@ -268,6 +284,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; LMULMAX8-NEXT: vse64.v v12, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i64: @@ -293,6 +310,7 @@ ; LMULMAX1-NEXT: vse64.v v11, (a0) ; LMULMAX1-NEXT: addi a0, a1, 32 ; LMULMAX1-NEXT: vse64.v v10, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptoui <8 x float> %a to <8 x i64> @@ -309,6 +327,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %d = fptosi <2 x half> %a to <2 x i64> @@ -325,6 +344,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %d = fptoui <2 x half> %a to <2 x i64> @@ -339,6 +359,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptosi <2 x half> %x to <2 x i1> ret <2 x i1> %z @@ -351,6 +372,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptoui <2 x half> %x to <2 x i1> ret <2 x i1> %z @@ -368,6 +390,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %d = fptosi <2 x double> %a to <2 x i8> @@ -387,6 +410,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %d = fptoui <2 x double> %a to <2 x i8> @@ -401,6 +425,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptosi <2 x double> %x to <2 x i1> ret <2 x i1> %z @@ -413,6 +438,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = fptoui <2 x double> %x to <2 x i1> ret <2 x i1> %z @@ -430,6 +456,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2si_v8f64_v8i8: @@ -477,6 +504,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse8.v v12, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptosi <8 x double> %a to <8 x i8> @@ -496,6 +524,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2ui_v8f64_v8i8: @@ -543,6 +572,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse8.v v12, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x %d = fptoui <8 x double> %a to <8 x i8> @@ -557,6 +587,7 @@ ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX8-NEXT: vand.vi v8, v12, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2si_v8f64_v8i1: @@ -609,6 +640,7 @@ ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x double> %x to <8 x i1> ret <8 x i1> %z @@ -621,6 +653,7 @@ ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX8-NEXT: vand.vi v8, v12, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fp2ui_v8f64_v8i1: @@ -673,6 +706,7 @@ ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x double> %x to <8 x i1> ret <8 x i1> %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %d = sitofp <2 x i32> %a to <2 x float> @@ -25,6 +26,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %d = uitofp <2 x i32> %a to <2 x float> @@ -39,6 +41,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = sitofp <2 x i1> %x to <2 x float> ret <2 x float> %z @@ -51,6 +54,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = uitofp <2 x i1> %x to <2 x float> ret <2 x float> %z @@ -63,6 +67,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: si2fp_v8i32_v8f32: @@ -76,6 +81,7 @@ ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %d = sitofp <8 x i32> %a to <8 x float> @@ -90,6 +96,7 @@ ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: ui2fp_v8i32_v8f32: @@ -103,6 +110,7 @@ ; LMULMAX1-NEXT: vse32.v v9, (a1) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %d = uitofp <8 x i32> %a to <8 x float> @@ -117,6 +125,7 @@ ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, -1, v0 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: si2fp_v8i1_v8f32: @@ -135,6 +144,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v9 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = sitofp <8 x i1> %x to <8 x float> ret <8 x float> %z @@ -147,6 +157,7 @@ ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: ui2fp_v8i1_v8f32: @@ -165,6 +176,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v9 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = uitofp <8 x i1> %x to <8 x float> ret <8 x float> %z @@ -179,6 +191,7 @@ ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %d = sitofp <2 x i16> %a to <2 x double> @@ -195,6 +208,7 @@ ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %d = uitofp <2 x i16> %a to <2 x double> @@ -211,6 +225,7 @@ ; LMULMAX8-NEXT: vsext.vf4 v12, v8 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v12 ; LMULMAX8-NEXT: vse64.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: si2fp_v8i16_v8f64: @@ -240,6 +255,7 @@ ; LMULMAX1-NEXT: vse64.v v11, (a0) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %d = sitofp <8 x i16> %a to <8 x double> @@ -256,6 +272,7 @@ ; LMULMAX8-NEXT: vzext.vf4 v12, v8 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v12 ; LMULMAX8-NEXT: vse64.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: ui2fp_v8i16_v8f64: @@ -285,6 +302,7 @@ ; LMULMAX1-NEXT: vse64.v v11, (a0) ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %d = uitofp <8 x i16> %a to <8 x double> @@ -299,6 +317,7 @@ ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, -1, v0 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: si2fp_v8i1_v8f64: @@ -338,6 +357,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-NEXT: vmerge.vim v11, v11, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v11, v11 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = sitofp <8 x i1> %x to <8 x double> ret <8 x double> %z @@ -350,6 +370,7 @@ ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: ui2fp_v8i1_v8f64: @@ -389,6 +410,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-NEXT: vmerge.vim v11, v11, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v11, v11 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %z = uitofp <8 x i1> %x to <8 x double> ret <8 x double> %z @@ -404,6 +426,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %d = sitofp <2 x i64> %a to <2 x half> @@ -421,6 +444,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %d = uitofp <2 x i64> %a to <2 x half> @@ -435,6 +459,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = sitofp <2 x i1> %x to <2 x half> ret <2 x half> %z @@ -447,6 +472,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = uitofp <2 x i1> %x to <2 x half> ret <2 x half> %z @@ -462,6 +488,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: si2fp_v8i64_v8f16: @@ -501,6 +528,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse16.v v12, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %d = sitofp <8 x i64> %a to <8 x half> @@ -518,6 +546,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: ui2fp_v8i64_v8f16: @@ -557,6 +586,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v8, 6 ; LMULMAX1-NEXT: vse16.v v12, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %d = uitofp <8 x i64> %a to <8 x half> @@ -571,6 +601,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = sitofp <8 x i1> %x to <8 x half> ret <8 x half> %z @@ -583,6 +614,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %z = uitofp <8 x i1> %x to <8 x half> ret <8 x half> %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 0) @@ -24,6 +25,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 2) @@ -37,6 +39,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 6) @@ -50,6 +53,7 @@ ; LMULMAX2-NEXT: vle32.v v12, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 0 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0: @@ -62,6 +66,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v12, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 4 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp %v = call @llvm.experimental.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 0) @@ -75,6 +80,7 @@ ; LMULMAX2-NEXT: vle32.v v12, (a0) ; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v8, v12, 8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8: @@ -87,6 +93,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v16, 12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp %v = call @llvm.experimental.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 8) @@ -98,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( undef, <2 x i32> %sv, i64 0) @@ -115,6 +123,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <4 x i32>, <4 x i32>* %vp @@ -133,6 +142,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <4 x i32>, <4 x i32>* %vp @@ -152,6 +162,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0) @@ -170,6 +181,7 @@ ; LMULMAX2-NEXT: vslideup.vi v10, v8, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_0: @@ -182,6 +194,7 @@ ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp @@ -201,6 +214,7 @@ ; LMULMAX2-NEXT: vslideup.vi v10, v8, 2 ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_2: @@ -212,6 +226,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp @@ -230,6 +245,7 @@ ; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_6: @@ -242,6 +258,7 @@ ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp @@ -258,6 +275,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_undef_v2i32_6: @@ -268,6 +286,7 @@ ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6) @@ -286,6 +305,7 @@ ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp %sv = load <2 x i16>, <2 x i16>* %svp @@ -304,6 +324,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp %sv = load <2 x i16>, <2 x i16>* %svp @@ -324,6 +345,7 @@ ; LMULMAX2-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_0: @@ -336,6 +358,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp @@ -356,6 +379,7 @@ ; LMULMAX2-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vsm.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_16: @@ -369,6 +393,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vsm.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp @@ -396,6 +421,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp @@ -423,6 +449,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp @@ -438,6 +465,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp %c = call @llvm.experimental.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 0) @@ -451,6 +479,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp %c = call @llvm.experimental.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 4) @@ -473,6 +502,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <4 x i1>, <4 x i1>* %svp %c = call @llvm.experimental.vector.insert.v4i1.nxv2i1( %v, <4 x i1> %sv, i64 0) @@ -486,6 +516,7 @@ ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp %c = call @llvm.experimental.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 0) @@ -499,6 +530,7 @@ ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp %c = call @llvm.experimental.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 16) @@ -516,6 +548,7 @@ ; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 4 ; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv0 = load <2 x i64>, <2 x i64>* %psv0 %sv1 = load <2 x i64>, <2 x i64>* %psv1 @@ -531,6 +564,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vs8r.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 0) @@ -546,6 +580,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vs8r.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 2) @@ -581,6 +616,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -17,6 +17,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; RV32-NEXT: vslideup.vi v8, v10, 3 ; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v4i64: @@ -27,6 +28,7 @@ ; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vslideup.vi v8, v10, 3 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = insertelement <4 x i64> %a, i64 %y, i32 3 @@ -65,11 +67,13 @@ ; RV32-NEXT: sw a2, 20(a0) ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vse64.v v10, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v3i64: ; RV64: # %bb.0: ; RV64-NEXT: sd a1, 16(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <3 x i64>, <3 x i64>* %x, align 8 %b = insertelement <3 x i64> %a, i64 %y, i32 2 @@ -87,6 +91,7 @@ ; CHECK-NEXT: vslideup.vi v8, v9, 14 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> %a, i8 %y, i32 14 @@ -106,6 +111,7 @@ ; RV32-NEXT: vslideup.vx v8, v12, a2 ; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v32i16: @@ -120,6 +126,7 @@ ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu ; RV64-NEXT: vse16.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = insertelement <32 x i16> %a, i16 %y, i32 %idx @@ -138,6 +145,7 @@ ; RV32-NEXT: vslideup.vx v8, v10, a1 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8f32: @@ -151,6 +159,7 @@ ; RV64-NEXT: vslideup.vx v8, v10, a1 ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> %a, float %y, i32 %idx @@ -167,6 +176,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = insertelement <8 x i64> %a, i64 -1, i32 0 @@ -186,6 +196,7 @@ ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64: @@ -200,6 +211,7 @@ ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = insertelement <8 x i64> %a, i64 -1, i32 %idx @@ -216,6 +228,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = insertelement <8 x i64> %a, i64 6, i32 0 @@ -235,6 +248,7 @@ ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64: @@ -249,6 +263,7 @@ ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = insertelement <8 x i64> %a, i64 6, i32 %idx @@ -270,6 +285,7 @@ ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = insertelement <8 x i64> %a, i64 6, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -8,6 +8,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -19,6 +20,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -33,6 +35,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -45,6 +48,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -58,6 +62,7 @@ ; CHECK-NEXT: addi a1, zero, 3 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -73,6 +78,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 store <4 x i8> , <4 x i8>* %z1 @@ -92,6 +98,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 store <4 x i8> , <4 x i8>* %z1 @@ -113,6 +120,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret i8>* %z2, <4 x i8>* %z3) { store <4 x i8> , <4 x i8>* %z0 @@ -133,6 +141,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a2) ; CHECK-NEXT: vse8.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 store <4 x i8> , <4 x i8>* %z1 @@ -149,6 +158,7 @@ ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 3 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 ret void @@ -163,6 +173,7 @@ ; CHECK-NEXT: addi a1, zero, -3 ; CHECK-NEXT: vmadd.vx v9, a1, v8 ; CHECK-NEXT: vse8.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i8> , <4 x i8>* %z0 ret void @@ -180,6 +191,7 @@ ; CHECK-NEXT: vse32.v v9, (a1) ; CHECK-NEXT: vse32.v v9, (a2) ; CHECK-NEXT: vse32.v v9, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i32> , <4 x i32>* %z0 store <4 x i32> , <4 x i32>* %z1 @@ -202,6 +214,7 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v9, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step1_add0_v4i64: @@ -209,6 +222,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vi v9, v8, 2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ret <4 x i64> } @@ -226,6 +240,7 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v9, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step2_add0_v4i64: @@ -234,6 +249,7 @@ ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vadd.vi v9, v8, 4 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ret <4 x i64> } @@ -267,6 +283,7 @@ ; RV32-NEXT: vmv.v.i v8, -2 ; RV32-NEXT: vse8.v v8, (a4) ; RV32-NEXT: vse8.v v9, (a5) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_no_vid_v4i8: @@ -297,6 +314,7 @@ ; RV64-NEXT: vmv.v.i v8, -2 ; RV64-NEXT: vse8.v v8, (a4) ; RV64-NEXT: vse8.v v9, (a5) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <4 x i8> , <4 x i8>* %z0 store <4 x i8> , <4 x i8>* %z1 @@ -317,6 +335,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v9, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <8 x i16> , <8 x i16>* %x ret void @@ -328,6 +347,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 8 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <8 x i16> , <8 x i16>* %x ret void @@ -336,6 +356,7 @@ define void @buildvec_dominant0_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: buildvec_dominant0_v2i8: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x i8> , <2 x i8>* %x ret void @@ -347,6 +368,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x i8> , <2 x i8>* %x ret void @@ -359,6 +381,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x i8> , <2 x i8>* %x ret void @@ -372,6 +395,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_dominant0_v2i32: @@ -389,6 +413,7 @@ ; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV64-NEXT: vmv.s.x v8, a1 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <2 x i64> , <2 x i64>* %x ret void @@ -402,6 +427,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_dominant1_optsize_v2i32: @@ -411,6 +437,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <2 x i64> , <2 x i64>* %x ret void @@ -424,6 +451,7 @@ ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <8 x i8> , <8 x i8>* %x ret void @@ -438,6 +466,7 @@ ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v8i8_v2i32: @@ -448,6 +477,7 @@ ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <8 x i8> , <8 x i8>* %x ret void @@ -461,6 +491,7 @@ ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vle8.v v8, (a1) ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v16i8_v2i64: @@ -475,6 +506,7 @@ ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -489,6 +521,7 @@ ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq2_v16i8_v2i64: @@ -499,6 +532,7 @@ ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <16 x i8> , <16 x i8>* %x ret void @@ -521,6 +555,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmerge.vim v8, v8, 3, v0 ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_seq_v9i8: @@ -534,6 +569,7 @@ ; RV64-NEXT: slli a1, a1, 16 ; RV64-NEXT: addi a1, a1, 513 ; RV64-NEXT: sd a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <9 x i8> , <9 x i8>* %x ret void @@ -547,6 +583,7 @@ ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i16> , <4 x i16>* %x ret void @@ -575,6 +612,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vse32.v v9, (a6) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i32> , <4 x i32>* %z0 store <4 x i32> , <4 x i32>* %z1 @@ -613,6 +651,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vse16.v v9, (a6) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i16> , <4 x i16>* %z0 store <4 x i16> , <4 x i16>* %z1 @@ -634,6 +673,7 @@ ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vrsub.vi v8, v8, -5 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <8 x i8> , <8 x i8>* %z0 ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = sext <4 x i8> %a to <4 x i32> @@ -29,6 +30,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = zext <4 x i8> %a to <4 x i32> @@ -44,6 +46,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX8-NEXT: vsext.vf4 v10, v8 ; LMULMAX8-NEXT: vse32.v v10, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: sext_v8i8_v8i32: @@ -53,6 +56,7 @@ ; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX2-NEXT: vsext.vf4 v10, v8 ; LMULMAX2-NEXT: vse32.v v10, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: sext_v8i8_v8i32: @@ -67,6 +71,7 @@ ; LMULMAX1-NEXT: addi a0, a1, 16 ; LMULMAX1-NEXT: vse32.v v10, (a0) ; LMULMAX1-NEXT: vse32.v v9, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = sext <8 x i8> %a to <8 x i32> @@ -83,6 +88,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; LMULMAX8-NEXT: vsext.vf4 v16, v8 ; LMULMAX8-NEXT: vse32.v v16, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: sext_v32i8_v32i32: @@ -109,6 +115,7 @@ ; LMULMAX2-NEXT: vse32.v v16, (a0) ; LMULMAX2-NEXT: addi a0, a1, 32 ; LMULMAX2-NEXT: vse32.v v12, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: sext_v32i8_v32i32: @@ -156,6 +163,7 @@ ; LMULMAX1-NEXT: vse32.v v13, (a0) ; LMULMAX1-NEXT: addi a0, a1, 80 ; LMULMAX1-NEXT: vse32.v v11, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = sext <32 x i8> %a to <32 x i32> @@ -173,6 +181,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = trunc <4 x i32> %a to <4 x i8> @@ -190,6 +199,7 @@ ; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: trunc_v8i8_v8i32: @@ -201,6 +211,7 @@ ; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX2-NEXT: vse8.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: trunc_v8i8_v8i32: @@ -224,6 +235,7 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse8.v v10, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = trunc <8 x i32> %a to <8 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -36,6 +37,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -54,6 +56,7 @@ ; CHECK-NEXT: vle8.v v12, (a1) ; CHECK-NEXT: vmslt.vv v16, v12, v8 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -71,6 +74,7 @@ ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmslt.vv v24, v8, v16 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -87,6 +91,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -103,6 +108,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -120,6 +126,7 @@ ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vmsltu.vv v12, v10, v8 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -137,6 +144,7 @@ ; CHECK-NEXT: vle8.v v12, (a1) ; CHECK-NEXT: vmsltu.vv v16, v8, v12 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -154,6 +162,7 @@ ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmsleu.vv v24, v16, v8 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -170,6 +179,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -185,6 +195,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -202,6 +213,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -219,6 +231,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -236,6 +249,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -253,6 +267,7 @@ ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -269,6 +284,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -286,6 +302,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -303,6 +320,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -321,6 +339,7 @@ ; CHECK-NEXT: vmv.v.x v16, a1 ; CHECK-NEXT: vmsleu.vv v24, v16, v8 ; CHECK-NEXT: vsm.v v24, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -337,6 +356,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -353,6 +373,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -370,6 +391,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -387,6 +409,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -404,6 +427,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -420,6 +444,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -437,6 +462,7 @@ ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -454,6 +480,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -471,6 +498,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -488,6 +516,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -505,6 +534,7 @@ ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -521,6 +551,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -538,6 +569,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vi v10, v8, 0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 0, i32 0 @@ -555,6 +587,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v12, v8, zero ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 0, i32 0 @@ -572,6 +605,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vi v16, v8, -1 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 0, i32 0 @@ -588,6 +622,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vi v8, v8, -1 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 0, i32 0 @@ -604,6 +639,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -622,6 +658,7 @@ ; CHECK-NEXT: addi a0, zero, 5 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0 ; CHECK-NEXT: vsm.v v10, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 5, i32 0 @@ -639,6 +676,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vi v12, v8, 4 ; CHECK-NEXT: vsm.v v12, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 5, i32 0 @@ -656,6 +694,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vi v16, v8, 4 ; CHECK-NEXT: vsm.v v16, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 5, i32 0 @@ -672,6 +711,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vi v8, v8, 5 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 5, i32 0 @@ -691,6 +731,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -710,6 +751,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -729,6 +771,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -748,6 +791,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -767,6 +811,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -786,6 +831,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> ret <4 x i16> %s @@ -23,6 +24,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> ret <8 x i32> %s @@ -36,6 +38,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> , <4 x i16> %x, <4 x i32> ret <4 x i16> %s @@ -49,6 +52,7 @@ ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> , <4 x i32> ret <4 x i16> %s @@ -63,6 +67,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> undef, <4 x i32> ret <4 x i16> %s @@ -77,6 +82,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> undef, <4 x i16> %x, <4 x i32> ret <4 x i16> %s @@ -96,6 +102,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> ret <4 x i16> %s @@ -113,6 +120,7 @@ ; CHECK-NEXT: vmv.v.i v9, 5 ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> , <4 x i16> %x, <4 x i32> ret <4 x i16> %s @@ -131,6 +139,7 @@ ; CHECK-NEXT: vmv.v.i v9, 5 ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> , <4 x i32> ret <4 x i16> %s @@ -146,6 +155,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v8i64: @@ -156,6 +166,7 @@ ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> undef, <8 x i32> ret <8 x i64> %s @@ -171,6 +182,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v8i64: @@ -181,6 +193,7 @@ ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <8 x i64> undef, <8 x i64> %x, <8 x i32> ret <8 x i64> %s @@ -207,6 +220,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v16, v12, v20, v0.t ; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: @@ -228,6 +242,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> ret <8 x i64> %s @@ -253,6 +268,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_xv_v8i64: @@ -267,6 +283,7 @@ ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <8 x i64> , <8 x i64> %x, <8 x i32> ret <8 x i64> %s @@ -292,6 +309,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vmv.v.i v16, 5 ; RV32-NEXT: vrgatherei16.vv v8, v16, v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vx_v8i64: @@ -306,6 +324,7 @@ ; RV64-NEXT: vmv.v.i v12, 5 ; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> , <8 x i32> ret <8 x i64> %s @@ -326,6 +345,7 @@ ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> %z = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> @@ -339,6 +359,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 4 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuff @@ -355,6 +376,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuff @@ -372,6 +394,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuff @@ -387,6 +410,7 @@ ; CHECK-NEXT: vrgather.vi v10, v8, 2 ; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -408,6 +432,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -426,6 +451,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -446,6 +472,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vi v10, v9, 0, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: splat_ve2_we0_ins_i2ve4: @@ -462,6 +489,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vi v10, v9, 0, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -483,6 +511,7 @@ ; CHECK-NEXT: vrgather.vi v10, v8, 2 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -509,6 +538,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: @@ -531,6 +561,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %shuff = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> ret <8 x i8> %shuff @@ -549,6 +580,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 4 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %shuf = shufflevector <4 x i8> %v, <4 x i8> undef, <8 x i32> ret <8 x i8> %shuf diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <16 x i8> undef, i8 %y, i32 0 %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer @@ -25,6 +26,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x i16> undef, i16 %y, i32 0 %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer @@ -38,6 +40,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x i32> undef, i32 %y, i32 0 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer @@ -57,6 +60,7 @@ ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 +; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV32-NEXT: ret ; ; LMULMAX2-RV32-LABEL: splat_v2i64: @@ -70,6 +74,7 @@ ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v2i64: @@ -83,6 +88,7 @@ ; LMULMAX1-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi sp, sp, 16 +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX8-RV64-LABEL: splat_v2i64: @@ -90,6 +96,7 @@ ; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX8-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV64-NEXT: ret ; ; LMULMAX2-RV64-LABEL: splat_v2i64: @@ -97,6 +104,7 @@ ; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v2i64: @@ -104,6 +112,7 @@ ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <2 x i64> undef, i64 %y, i32 0 %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer @@ -118,6 +127,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse8.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_v32i8: @@ -126,6 +136,7 @@ ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v32i8: @@ -135,6 +146,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse8.v v8, (a1) ; LMULMAX1-NEXT: vse8.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <32 x i8> undef, i8 %y, i32 0 %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer @@ -148,6 +160,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse16.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_v16i16: @@ -155,6 +168,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v16i16: @@ -164,6 +178,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a1) ; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <16 x i16> undef, i16 %y, i32 0 %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer @@ -177,6 +192,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_v8i32: @@ -184,6 +200,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v8i32: @@ -193,6 +210,7 @@ ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <8 x i32> undef, i32 %y, i32 0 %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer @@ -212,6 +230,7 @@ ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 +; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV32-NEXT: ret ; ; LMULMAX2-RV32-LABEL: splat_v4i64: @@ -225,6 +244,7 @@ ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v4i64: @@ -238,6 +258,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX8-RV64-LABEL: splat_v4i64: @@ -245,6 +266,7 @@ ; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX8-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV64-NEXT: ret ; ; LMULMAX2-RV64-LABEL: splat_v4i64: @@ -252,6 +274,7 @@ ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v4i64: @@ -261,6 +284,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <4 x i64> undef, i64 %y, i32 0 %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer @@ -274,6 +298,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <16 x i8> undef, i8 0, i32 0 %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer @@ -287,6 +312,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x i16> undef, i16 0, i32 0 %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer @@ -300,6 +326,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x i32> undef, i32 0, i32 0 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer @@ -313,6 +340,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <2 x i64> undef, i64 0, i32 0 %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer @@ -327,6 +355,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v32i8: @@ -335,6 +364,7 @@ ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v32i8: @@ -344,6 +374,7 @@ ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse8.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <32 x i8> undef, i8 0, i32 0 %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer @@ -357,6 +388,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse16.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v16i16: @@ -364,6 +396,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v16i16: @@ -373,6 +406,7 @@ ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <16 x i16> undef, i16 0, i32 0 %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer @@ -386,6 +420,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v8i32: @@ -393,6 +428,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v8i32: @@ -402,6 +438,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <8 x i32> undef, i32 0, i32 0 %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer @@ -415,6 +452,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v4i64: @@ -422,6 +460,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zero_v4i64: @@ -431,6 +470,7 @@ ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_zero_v4i64: @@ -440,6 +480,7 @@ ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 16 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <4 x i64> undef, i64 0, i32 0 %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer @@ -453,6 +494,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <16 x i8> undef, i8 -1, i32 0 %b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer @@ -466,6 +508,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x i16> undef, i16 -1, i32 0 %b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer @@ -479,6 +522,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x i32> undef, i32 -1, i32 0 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer @@ -492,6 +536,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <2 x i64> undef, i64 -1, i32 0 %b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer @@ -506,6 +551,7 @@ ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse8.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v32i8: @@ -514,6 +560,7 @@ ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v32i8: @@ -523,6 +570,7 @@ ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse8.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <32 x i8> undef, i8 -1, i32 0 %b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer @@ -536,6 +584,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse16.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v16i16: @@ -543,6 +592,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v16i16: @@ -552,6 +602,7 @@ ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <16 x i16> undef, i16 -1, i32 0 %b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer @@ -565,6 +616,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse32.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v8i32: @@ -572,6 +624,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v8i32: @@ -581,6 +634,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = insertelement <8 x i32> undef, i32 -1, i32 0 %b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer @@ -594,6 +648,7 @@ ; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v4i64: @@ -601,6 +656,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_allones_v4i64: @@ -610,6 +666,7 @@ ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_allones_v4i64: @@ -619,6 +676,7 @@ ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 16 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <4 x i64> undef, i64 -1, i32 0 %b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer @@ -637,6 +695,7 @@ ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vadd.vi v8, v8, -1 ; LMULMAX8-NEXT: vse64.v v8, (a0) +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_with_use_v4i64: @@ -645,6 +704,7 @@ ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vadd.vi v8, v8, -1 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_allones_with_use_v4i64: @@ -660,6 +720,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_allones_with_use_v4i64: @@ -672,6 +733,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vi v9, v9, -1 ; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = add <4 x i64> %a, @@ -697,6 +759,7 @@ ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-RV32-NEXT: vse64.v v8, (a3) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 +; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV32-NEXT: ret ; ; LMULMAX2-RV32-LABEL: vadd_vx_v16i64: @@ -727,6 +790,7 @@ ; LMULMAX2-RV32-NEXT: vse64.v v12, (a3) ; LMULMAX2-RV32-NEXT: addi a0, a3, 32 ; LMULMAX2-RV32-NEXT: vse64.v v14, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX1-RV32-LABEL: vadd_vx_v16i64: @@ -777,6 +841,7 @@ ; LMULMAX1-RV32-NEXT: vse64.v v14, (a3) ; LMULMAX1-RV32-NEXT: addi a0, a3, 16 ; LMULMAX1-RV32-NEXT: vse64.v v15, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX8-RV64-LABEL: vadd_vx_v16i64: @@ -785,6 +850,7 @@ ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: vadd.vx v8, v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a2) +; LMULMAX8-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-RV64-NEXT: ret ; ; LMULMAX2-RV64-LABEL: vadd_vx_v16i64: @@ -808,6 +874,7 @@ ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: addi a0, a2, 32 ; LMULMAX2-RV64-NEXT: vse64.v v10, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV64-LABEL: vadd_vx_v16i64: @@ -851,6 +918,7 @@ ; LMULMAX1-RV64-NEXT: vse64.v v12, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a2, 16 ; LMULMAX1-RV64-NEXT: vse64.v v13, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %va = load <16 x i64>, <16 x i64>* %a %head = insertelement <16 x i64> undef, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vlse8.v v8, (a1), zero ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = extractelement <16 x i8> %a, i32 12 @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = extractelement <8 x i16> %a, i32 5 @@ -43,6 +45,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = extractelement <4 x i32> %a, i32 3 @@ -59,6 +62,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a1), zero ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = extractelement <2 x i64> %a, i32 1 @@ -76,6 +80,7 @@ ; LMULMAX4-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; LMULMAX4-NEXT: vlse8.v v8, (a1), zero ; LMULMAX4-NEXT: vse8.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64i8: @@ -89,6 +94,7 @@ ; LMULMAX1-NEXT: vse8.v v8, (a3) ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: vse8.v v8, (a2) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = extractelement <64 x i8> %a, i32 32 @@ -106,6 +112,7 @@ ; LMULMAX4-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; LMULMAX4-NEXT: vlse16.v v8, (a1), zero ; LMULMAX4-NEXT: vse16.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i16: @@ -120,6 +127,7 @@ ; LMULMAX1-NEXT: vse16.v v8, (a2) ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = extractelement <32 x i16> %a, i32 25 @@ -136,6 +144,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; LMULMAX4-NEXT: vlse32.v v8, (a1), zero ; LMULMAX4-NEXT: vse32.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i32: @@ -150,6 +159,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: vse32.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = extractelement <16 x i32> %a, i32 9 @@ -166,6 +176,7 @@ ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; LMULMAX4-NEXT: vlse64.v v8, (a1), zero ; LMULMAX4-NEXT: vse64.v v8, (a0) +; LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v8i64: @@ -180,6 +191,7 @@ ; LMULMAX1-NEXT: vse64.v v8, (a2) ; LMULMAX1-NEXT: vse64.v v8, (a0) ; LMULMAX1-NEXT: vse64.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = extractelement <8 x i64> %a, i32 3 @@ -196,6 +208,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), zero ; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -212,6 +225,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vlse16.v v8, (a0), zero ; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -28,6 +29,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -44,6 +46,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -60,6 +63,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -76,6 +80,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -92,6 +97,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -108,6 +114,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -124,6 +131,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -140,6 +148,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -156,6 +165,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -172,6 +182,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -188,6 +199,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -204,6 +216,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -220,6 +233,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -236,6 +250,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -252,6 +267,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -268,6 +284,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -284,6 +301,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -300,6 +318,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -316,6 +335,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -332,6 +352,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -348,6 +369,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -364,6 +386,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -380,6 +403,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -396,6 +420,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -412,6 +437,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -428,6 +454,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -444,6 +471,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -460,6 +488,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -476,6 +505,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -492,6 +522,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -508,6 +539,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -524,6 +556,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -540,6 +573,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -556,6 +590,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -572,6 +607,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -588,6 +624,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -604,6 +641,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -620,6 +658,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -636,6 +675,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -652,6 +692,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -668,6 +709,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -684,6 +726,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -700,6 +743,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -716,6 +760,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -732,6 +777,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -748,6 +794,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -764,6 +811,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -780,6 +828,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -796,6 +845,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -812,6 +862,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -828,6 +879,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -882,6 +934,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_v16i8: @@ -929,6 +982,7 @@ ; RV64-NEXT: vadd.vv v8, v8, v10 ; RV64-NEXT: vsrl.vv v8, v8, v9 ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = udiv <16 x i8> %a, @@ -970,6 +1024,7 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = udiv <8 x i16> %a, @@ -1003,6 +1058,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = udiv <4 x i32> %a, @@ -1028,6 +1084,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_v2i64: @@ -1059,6 +1116,7 @@ ; RV64-NEXT: vadd.vi v9, v9, 1 ; RV64-NEXT: vsrl.vv v8, v8, v9 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = udiv <2 x i64> %a, @@ -1085,6 +1143,7 @@ ; RV32-NEXT: vmulhu.vv v8, v8, v10 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_v16i8: @@ -1105,6 +1164,7 @@ ; RV64-NEXT: vmulhu.vv v8, v8, v10 ; RV64-NEXT: vsrl.vv v8, v8, v9 ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = udiv <16 x i8> %a, @@ -1132,6 +1192,7 @@ ; RV32-NEXT: vsrl.vi v9, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_v8i16: @@ -1153,6 +1214,7 @@ ; RV64-NEXT: vsrl.vi v9, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse16.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = sdiv <8 x i16> %a, @@ -1180,6 +1242,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_v4i32: @@ -1202,6 +1265,7 @@ ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = sdiv <4 x i32> %a, @@ -1241,6 +1305,7 @@ ; RV32-NEXT: vsrl.vx v9, v10, a1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_v2i64: @@ -1269,6 +1334,7 @@ ; RV64-NEXT: vsra.vv v9, v11, v10 ; RV64-NEXT: vadd.vv v8, v9, v8 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = sdiv <2 x i64> %a, @@ -1284,6 +1350,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -1301,6 +1368,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -1318,6 +1386,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -1335,6 +1404,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -1352,6 +1422,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -1369,6 +1440,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -1386,6 +1458,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -1403,6 +1476,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -1420,6 +1494,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -1437,6 +1512,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -1454,6 +1530,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -1471,6 +1548,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -1488,6 +1566,7 @@ ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -1505,6 +1584,7 @@ ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -1522,6 +1602,7 @@ ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -1539,6 +1620,7 @@ ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = load <2 x i64>, <2 x i64>* %y @@ -1557,6 +1639,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: add_v32i8: @@ -1572,6 +1655,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: add_v32i8: @@ -1587,6 +1671,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -1603,6 +1688,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: add_v16i16: @@ -1618,6 +1704,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: add_v16i16: @@ -1633,6 +1720,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -1649,6 +1737,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: add_v8i32: @@ -1664,6 +1753,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: add_v8i32: @@ -1679,6 +1769,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -1695,6 +1786,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: add_v4i64: @@ -1710,6 +1802,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: add_v4i64: @@ -1725,6 +1818,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -1742,6 +1836,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sub_v32i8: @@ -1757,6 +1852,7 @@ ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sub_v32i8: @@ -1772,6 +1868,7 @@ ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -1788,6 +1885,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sub_v16i16: @@ -1803,6 +1901,7 @@ ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sub_v16i16: @@ -1818,6 +1917,7 @@ ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -1834,6 +1934,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sub_v8i32: @@ -1849,6 +1950,7 @@ ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sub_v8i32: @@ -1864,6 +1966,7 @@ ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -1880,6 +1983,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sub_v4i64: @@ -1895,6 +1999,7 @@ ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sub_v4i64: @@ -1910,6 +2015,7 @@ ; LMULMAX1-RV64-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -1927,6 +2033,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mul_v32i8: @@ -1942,6 +2049,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mul_v32i8: @@ -1957,6 +2065,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -1973,6 +2082,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mul_v16i16: @@ -1988,6 +2098,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mul_v16i16: @@ -2003,6 +2114,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2019,6 +2131,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mul_v8i32: @@ -2034,6 +2147,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mul_v8i32: @@ -2049,6 +2163,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2065,6 +2180,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mul_v4i64: @@ -2080,6 +2196,7 @@ ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mul_v4i64: @@ -2095,6 +2212,7 @@ ; LMULMAX1-RV64-NEXT: vmul.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -2112,6 +2230,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: and_v32i8: @@ -2127,6 +2246,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: and_v32i8: @@ -2142,6 +2262,7 @@ ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -2158,6 +2279,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: and_v16i16: @@ -2173,6 +2295,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: and_v16i16: @@ -2188,6 +2311,7 @@ ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2204,6 +2328,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: and_v8i32: @@ -2219,6 +2344,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: and_v8i32: @@ -2234,6 +2360,7 @@ ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2250,6 +2377,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: and_v4i64: @@ -2265,6 +2393,7 @@ ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: and_v4i64: @@ -2280,6 +2409,7 @@ ; LMULMAX1-RV64-NEXT: vand.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -2297,6 +2427,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: or_v32i8: @@ -2312,6 +2443,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: or_v32i8: @@ -2327,6 +2459,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -2343,6 +2476,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: or_v16i16: @@ -2358,6 +2492,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: or_v16i16: @@ -2373,6 +2508,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2389,6 +2525,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: or_v8i32: @@ -2404,6 +2541,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: or_v8i32: @@ -2419,6 +2557,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2435,6 +2574,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: or_v4i64: @@ -2450,6 +2590,7 @@ ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: or_v4i64: @@ -2465,6 +2606,7 @@ ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -2482,6 +2624,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: xor_v32i8: @@ -2497,6 +2640,7 @@ ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: xor_v32i8: @@ -2512,6 +2656,7 @@ ; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -2528,6 +2673,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: xor_v16i16: @@ -2543,6 +2689,7 @@ ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: xor_v16i16: @@ -2558,6 +2705,7 @@ ; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2574,6 +2722,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: xor_v8i32: @@ -2589,6 +2738,7 @@ ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: xor_v8i32: @@ -2604,6 +2754,7 @@ ; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2620,6 +2771,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: xor_v4i64: @@ -2635,6 +2787,7 @@ ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: xor_v4i64: @@ -2650,6 +2803,7 @@ ; LMULMAX1-RV64-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -2667,6 +2821,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: lshr_v32i8: @@ -2682,6 +2837,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: lshr_v32i8: @@ -2697,6 +2853,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -2713,6 +2870,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: lshr_v16i16: @@ -2728,6 +2886,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: lshr_v16i16: @@ -2743,6 +2902,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2759,6 +2919,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: lshr_v8i32: @@ -2774,6 +2935,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: lshr_v8i32: @@ -2789,6 +2951,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2805,6 +2968,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: lshr_v4i64: @@ -2820,6 +2984,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: lshr_v4i64: @@ -2835,6 +3000,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -2852,6 +3018,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ashr_v32i8: @@ -2867,6 +3034,7 @@ ; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ashr_v32i8: @@ -2882,6 +3050,7 @@ ; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -2898,6 +3067,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ashr_v16i16: @@ -2913,6 +3083,7 @@ ; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ashr_v16i16: @@ -2928,6 +3099,7 @@ ; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -2944,6 +3116,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ashr_v8i32: @@ -2959,6 +3132,7 @@ ; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ashr_v8i32: @@ -2974,6 +3148,7 @@ ; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -2990,6 +3165,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: ashr_v4i64: @@ -3005,6 +3181,7 @@ ; LMULMAX1-RV32-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: ashr_v4i64: @@ -3020,6 +3197,7 @@ ; LMULMAX1-RV64-NEXT: vsra.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3037,6 +3215,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: shl_v32i8: @@ -3052,6 +3231,7 @@ ; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: shl_v32i8: @@ -3067,6 +3247,7 @@ ; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -3083,6 +3264,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: shl_v16i16: @@ -3098,6 +3280,7 @@ ; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: shl_v16i16: @@ -3113,6 +3296,7 @@ ; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -3129,6 +3313,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: shl_v8i32: @@ -3144,6 +3329,7 @@ ; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: shl_v8i32: @@ -3159,6 +3345,7 @@ ; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -3175,6 +3362,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: shl_v4i64: @@ -3190,6 +3378,7 @@ ; LMULMAX1-RV32-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: shl_v4i64: @@ -3205,6 +3394,7 @@ ; LMULMAX1-RV64-NEXT: vsll.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3222,6 +3412,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sdiv_v32i8: @@ -3237,6 +3428,7 @@ ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sdiv_v32i8: @@ -3252,6 +3444,7 @@ ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -3268,6 +3461,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sdiv_v16i16: @@ -3283,6 +3477,7 @@ ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sdiv_v16i16: @@ -3298,6 +3493,7 @@ ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -3314,6 +3510,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sdiv_v8i32: @@ -3329,6 +3526,7 @@ ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sdiv_v8i32: @@ -3344,6 +3542,7 @@ ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -3360,6 +3559,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: sdiv_v4i64: @@ -3375,6 +3575,7 @@ ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: sdiv_v4i64: @@ -3390,6 +3591,7 @@ ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3407,6 +3609,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: srem_v32i8: @@ -3422,6 +3625,7 @@ ; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: srem_v32i8: @@ -3437,6 +3641,7 @@ ; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -3453,6 +3658,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: srem_v16i16: @@ -3468,6 +3674,7 @@ ; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: srem_v16i16: @@ -3483,6 +3690,7 @@ ; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -3499,6 +3707,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: srem_v8i32: @@ -3514,6 +3723,7 @@ ; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: srem_v8i32: @@ -3529,6 +3739,7 @@ ; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -3545,6 +3756,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: srem_v4i64: @@ -3560,6 +3772,7 @@ ; LMULMAX1-RV32-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: srem_v4i64: @@ -3575,6 +3788,7 @@ ; LMULMAX1-RV64-NEXT: vrem.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3592,6 +3806,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: udiv_v32i8: @@ -3607,6 +3822,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: udiv_v32i8: @@ -3622,6 +3838,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -3638,6 +3855,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: udiv_v16i16: @@ -3653,6 +3871,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: udiv_v16i16: @@ -3668,6 +3887,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -3684,6 +3904,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: udiv_v8i32: @@ -3699,6 +3920,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: udiv_v8i32: @@ -3714,6 +3936,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -3730,6 +3953,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: udiv_v4i64: @@ -3745,6 +3969,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: udiv_v4i64: @@ -3760,6 +3985,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3777,6 +4003,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: urem_v32i8: @@ -3792,6 +4019,7 @@ ; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: urem_v32i8: @@ -3807,6 +4035,7 @@ ; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -3823,6 +4052,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: urem_v16i16: @@ -3838,6 +4068,7 @@ ; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: urem_v16i16: @@ -3853,6 +4084,7 @@ ; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -3869,6 +4101,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: urem_v8i32: @@ -3884,6 +4117,7 @@ ; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: urem_v8i32: @@ -3899,6 +4133,7 @@ ; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -3915,6 +4150,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: urem_v4i64: @@ -3930,6 +4166,7 @@ ; LMULMAX1-RV32-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: urem_v4i64: @@ -3945,6 +4182,7 @@ ; LMULMAX1-RV64-NEXT: vremu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -3961,6 +4199,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v4i64: @@ -3976,6 +4215,7 @@ ; LMULMAX1-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-NEXT: vse64.v v8, (a0) ; LMULMAX1-NEXT: vse64.v v9, (a2) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -4035,6 +4275,7 @@ ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 2, v0 ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse8.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhu_v32i8: @@ -4085,6 +4326,7 @@ ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 2, v0 ; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse8.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-LABEL: mulhu_v32i8: @@ -4100,6 +4342,7 @@ ; LMULMAX1-NEXT: vdivu.vv v9, v10, v9 ; LMULMAX1-NEXT: vse8.v v9, (a0) ; LMULMAX1-NEXT: vse8.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = udiv <32 x i8> %a, @@ -4145,6 +4388,7 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhu_v16i16: @@ -4184,6 +4428,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v12 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-LABEL: mulhu_v16i16: @@ -4199,6 +4444,7 @@ ; LMULMAX1-NEXT: vdivu.vv v9, v10, v9 ; LMULMAX1-NEXT: vse16.v v9, (a0) ; LMULMAX1-NEXT: vse16.v v8, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = udiv <16 x i16> %a, @@ -4233,6 +4479,7 @@ ; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mulhu_v8i32: @@ -4268,6 +4515,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhu_v8i32: @@ -4283,6 +4531,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v9, v10, v9 ; LMULMAX1-RV64-NEXT: vse32.v v9, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = udiv <8 x i32> %a, @@ -4318,6 +4567,7 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhu_v4i64: @@ -4343,6 +4593,7 @@ ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v14 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mulhu_v4i64: @@ -4365,6 +4616,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhu_v4i64: @@ -4431,6 +4683,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = udiv <4 x i64> %a, @@ -4458,6 +4711,7 @@ ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse8.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhs_v32i8: @@ -4479,6 +4733,7 @@ ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse8.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mulhs_v32i8: @@ -4498,6 +4753,7 @@ ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhs_v32i8: @@ -4517,6 +4773,7 @@ ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = udiv <32 x i8> %a, @@ -4545,6 +4802,7 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 15 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhs_v16i16: @@ -4567,6 +4825,7 @@ ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 15 ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse16.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-LABEL: mulhs_v16i16: @@ -4585,6 +4844,7 @@ ; LMULMAX1-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: vse16.v v9, (a1) +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = sdiv <16 x i16> %a, @@ -4612,6 +4872,7 @@ ; LMULMAX2-RV32-NEXT: vsra.vi v8, v8, 1 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhs_v8i32: @@ -4634,6 +4895,7 @@ ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 31 ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mulhs_v8i32: @@ -4662,6 +4924,7 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhs_v8i32: @@ -4680,6 +4943,7 @@ ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = sdiv <8 x i32> %a, @@ -4723,6 +4987,7 @@ ; LMULMAX2-RV32-NEXT: vsra.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhs_v4i64: @@ -4755,6 +5020,7 @@ ; LMULMAX2-RV64-NEXT: vsra.vv v10, v12, v10 ; LMULMAX2-RV64-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX2-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV32-LABEL: mulhs_v4i64: @@ -4772,6 +5038,7 @@ ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: mulhs_v4i64: @@ -4808,6 +5075,7 @@ ; LMULMAX1-RV64-NEXT: vadd.vv v8, v10, v8 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = sdiv <4 x i64> %a, @@ -4824,6 +5092,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smin_v32i8: @@ -4839,6 +5108,7 @@ ; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smin_v32i8: @@ -4854,6 +5124,7 @@ ; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -4871,6 +5142,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smin_v16i16: @@ -4886,6 +5158,7 @@ ; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smin_v16i16: @@ -4901,6 +5174,7 @@ ; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -4918,6 +5192,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smin_v8i32: @@ -4933,6 +5208,7 @@ ; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smin_v8i32: @@ -4948,6 +5224,7 @@ ; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -4965,6 +5242,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smin_v4i64: @@ -4980,6 +5258,7 @@ ; LMULMAX1-RV32-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smin_v4i64: @@ -4995,6 +5274,7 @@ ; LMULMAX1-RV64-NEXT: vmin.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -5013,6 +5293,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smax_v32i8: @@ -5028,6 +5309,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smax_v32i8: @@ -5043,6 +5325,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -5060,6 +5343,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smax_v16i16: @@ -5075,6 +5359,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smax_v16i16: @@ -5090,6 +5375,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -5107,6 +5393,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smax_v8i32: @@ -5122,6 +5409,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smax_v8i32: @@ -5137,6 +5425,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -5154,6 +5443,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: smax_v4i64: @@ -5169,6 +5459,7 @@ ; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: smax_v4i64: @@ -5184,6 +5475,7 @@ ; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -5202,6 +5494,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umin_v32i8: @@ -5217,6 +5510,7 @@ ; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umin_v32i8: @@ -5232,6 +5526,7 @@ ; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -5249,6 +5544,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umin_v16i16: @@ -5264,6 +5560,7 @@ ; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umin_v16i16: @@ -5279,6 +5576,7 @@ ; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -5296,6 +5594,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umin_v8i32: @@ -5311,6 +5610,7 @@ ; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umin_v8i32: @@ -5326,6 +5626,7 @@ ; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -5343,6 +5644,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umin_v4i64: @@ -5358,6 +5660,7 @@ ; LMULMAX1-RV32-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umin_v4i64: @@ -5373,6 +5676,7 @@ ; LMULMAX1-RV64-NEXT: vminu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -5391,6 +5695,7 @@ ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse8.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umax_v32i8: @@ -5406,6 +5711,7 @@ ; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umax_v32i8: @@ -5421,6 +5727,7 @@ ; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse8.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse8.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -5438,6 +5745,7 @@ ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse16.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umax_v16i16: @@ -5453,6 +5761,7 @@ ; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umax_v16i16: @@ -5468,6 +5777,7 @@ ; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse16.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -5485,6 +5795,7 @@ ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse32.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umax_v8i32: @@ -5500,6 +5811,7 @@ ; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umax_v8i32: @@ -5515,6 +5827,7 @@ ; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse32.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -5532,6 +5845,7 @@ ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 ; LMULMAX2-NEXT: vse64.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: umax_v4i64: @@ -5547,6 +5861,7 @@ ; LMULMAX1-RV32-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: umax_v4i64: @@ -5562,6 +5877,7 @@ ; LMULMAX1-RV64-NEXT: vmaxu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x %b = load <4 x i64>, <4 x i64>* %y @@ -5578,6 +5894,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 -1, i32 0 @@ -5594,6 +5911,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 -1, i32 0 @@ -5610,6 +5928,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 -1, i32 0 @@ -5626,6 +5945,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 @@ -5642,6 +5962,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 1, i32 0 @@ -5658,6 +5979,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 1, i32 0 @@ -5674,6 +5996,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 1, i32 0 @@ -5690,6 +6013,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 1, i32 0 @@ -5706,6 +6030,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -5722,6 +6047,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -5738,6 +6064,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -5754,6 +6081,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -5770,6 +6098,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -5786,6 +6115,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -5803,6 +6133,7 @@ ; CHECK-NEXT: addi a1, zero, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 -1, i32 0 @@ -5820,6 +6151,7 @@ ; CHECK-NEXT: addi a1, zero, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 -1, i32 0 @@ -5837,6 +6169,7 @@ ; CHECK-NEXT: addi a1, zero, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 -1, i32 0 @@ -5854,6 +6187,7 @@ ; CHECK-NEXT: addi a1, zero, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 @@ -5870,6 +6204,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 1, i32 0 @@ -5886,6 +6221,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 1, i32 0 @@ -5902,6 +6238,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 1, i32 0 @@ -5918,6 +6255,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 1, i32 0 @@ -5934,6 +6272,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -5950,6 +6289,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -5966,6 +6306,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -5982,6 +6323,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -5998,6 +6340,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6014,6 +6357,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6030,6 +6374,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6046,6 +6391,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6062,6 +6408,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6078,6 +6425,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6094,6 +6442,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6110,6 +6459,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6126,6 +6476,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 -2, i32 0 @@ -6142,6 +6493,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 -2, i32 0 @@ -6158,6 +6510,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 -2, i32 0 @@ -6174,6 +6527,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -2, i32 0 @@ -6190,6 +6544,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 1, i32 0 @@ -6206,6 +6561,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 1, i32 0 @@ -6222,6 +6578,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 1, i32 0 @@ -6238,6 +6595,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 1, i32 0 @@ -6254,6 +6612,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6270,6 +6629,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6286,6 +6646,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6302,6 +6663,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6318,6 +6680,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6334,6 +6697,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6350,6 +6714,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 -2, i32 0 @@ -6366,6 +6731,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 -2, i32 0 @@ -6382,6 +6748,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 -2, i32 0 @@ -6398,6 +6765,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -2, i32 0 @@ -6414,6 +6782,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 1, i32 0 @@ -6430,6 +6799,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 1, i32 0 @@ -6446,6 +6816,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 1, i32 0 @@ -6462,6 +6833,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 1, i32 0 @@ -6478,6 +6850,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6494,6 +6867,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6510,6 +6884,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6526,6 +6901,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6542,6 +6918,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6558,6 +6935,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6574,6 +6952,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 -1, i32 0 @@ -6590,6 +6969,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 -1, i32 0 @@ -6606,6 +6986,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 -1, i32 0 @@ -6622,6 +7003,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, -1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 -1, i32 0 @@ -6638,6 +7020,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 1, i32 0 @@ -6654,6 +7037,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 1, i32 0 @@ -6670,6 +7054,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 1, i32 0 @@ -6686,6 +7071,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 1, i32 0 @@ -6702,6 +7088,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6718,6 +7105,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6734,6 +7122,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6750,6 +7139,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6766,6 +7156,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6782,6 +7173,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6798,6 +7190,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 7, i32 0 @@ -6814,6 +7207,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 15, i32 0 @@ -6830,6 +7224,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 31, i32 0 @@ -6846,6 +7241,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 31, i32 0 @@ -6862,6 +7258,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6878,6 +7275,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -6894,6 +7292,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -6910,6 +7309,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 7, i32 0 @@ -6926,6 +7326,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 15, i32 0 @@ -6942,6 +7343,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 31, i32 0 @@ -6958,6 +7360,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 31, i32 0 @@ -6974,6 +7377,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -6990,6 +7394,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7006,6 +7411,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7022,6 +7428,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 7, i32 0 @@ -7038,6 +7445,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 15, i32 0 @@ -7054,6 +7462,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 31, i32 0 @@ -7070,6 +7479,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = insertelement <2 x i64> undef, i64 31, i32 0 @@ -7086,6 +7496,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -7102,6 +7513,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7118,6 +7530,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7134,6 +7547,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -7150,6 +7564,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7166,6 +7581,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7182,6 +7598,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -7198,6 +7615,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7214,6 +7632,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7230,6 +7649,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -7246,6 +7666,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7262,6 +7683,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7278,6 +7700,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -7294,6 +7717,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -7310,6 +7734,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i32 0 @@ -7328,6 +7753,7 @@ ; CHECK-NEXT: vmulhu.vx v8, v8, a1 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = udiv <16 x i8> %a, @@ -7348,6 +7774,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_vx_v8i16: @@ -7362,6 +7789,7 @@ ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vse16.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = udiv <8 x i16> %a, @@ -7379,6 +7807,7 @@ ; RV32-NEXT: vmulhu.vx v8, v8, a1 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_vx_v4i32: @@ -7390,6 +7819,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a1 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = udiv <4 x i32> %a, @@ -7415,6 +7845,7 @@ ; RV32-NEXT: vsrl.vi v8, v8, 1 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_vx_v2i64: @@ -7432,6 +7863,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a1 ; RV64-NEXT: vsrl.vi v8, v8, 1 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = udiv <2 x i64> %a, @@ -7448,6 +7880,7 @@ ; CHECK-NEXT: vmulhu.vx v8, v8, a1 ; CHECK-NEXT: vsrl.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = udiv <16 x i8> %a, @@ -7467,6 +7900,7 @@ ; RV32-NEXT: vsrl.vi v9, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_vx_v8i16: @@ -7480,6 +7914,7 @@ ; RV64-NEXT: vsrl.vi v9, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse16.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = sdiv <8 x i16> %a, @@ -7499,6 +7934,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_vx_v4i32: @@ -7512,6 +7948,7 @@ ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = sdiv <4 x i32> %a, @@ -7539,6 +7976,7 @@ ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mulhs_vx_v2i64: @@ -7558,6 +7996,7 @@ ; RV64-NEXT: vsrl.vx v9, v8, a1 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = sdiv <2 x i64> %a, diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <1 x i1> undef, i1 %x, i32 0 ret <1 x i1> %1 @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <1 x i1> undef, i1 %x, i32 0 ret <1 x i1> %1 @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <2 x i1> undef, i1 %x, i32 0 %2 = insertelement <2 x i1> %1, i1 %y, i32 1 @@ -62,6 +65,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <2 x i1> undef, i1 %x, i32 0 %2 = insertelement <2 x i1> %1, i1 %y, i32 1 @@ -74,6 +78,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <3 x i1> } @@ -84,6 +89,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <3 x i1> } @@ -94,6 +100,7 @@ ; CHECK-NEXT: addi a0, zero, 6 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <4 x i1> } @@ -109,6 +116,7 @@ ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <4 x i1> undef, i1 %x, i32 0 %2 = insertelement <4 x i1> %1, i1 %x, i32 1 @@ -133,6 +141,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <4 x i1> undef, i1 %x, i32 0 %2 = insertelement <4 x i1> %1, i1 %x, i32 1 @@ -157,6 +166,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <4 x i1> undef, i1 0, i32 0 %2 = insertelement <4 x i1> %1, i1 %x, i32 1 @@ -171,6 +181,7 @@ ; CHECK-NEXT: addi a0, zero, 182 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <8 x i1> } @@ -186,6 +197,7 @@ ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <8 x i1> undef, i1 %x, i32 0 %2 = insertelement <8 x i1> %1, i1 %x, i32 1 @@ -218,6 +230,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <8 x i1> undef, i1 %x, i32 0 %2 = insertelement <8 x i1> %1, i1 %x, i32 1 @@ -250,6 +263,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <8 x i1> undef, i1 %x, i32 0 %2 = insertelement <8 x i1> %1, i1 %x, i32 1 @@ -281,6 +295,7 @@ ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %1 = insertelement <8 x i1> undef, i1 %x, i32 0 %2 = insertelement <8 x i1> %1, i1 %x, i32 1 @@ -299,6 +314,7 @@ ; CHECK-NEXT: addi a0, zero, 949 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <10 x i1> } @@ -310,6 +326,7 @@ ; CHECK-RV32-NEXT: addi a0, a0, 1718 ; CHECK-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-RV32-NEXT: vmv.s.x v0, a0 +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: buildvec_mask_v16i1: @@ -318,6 +335,7 @@ ; CHECK-RV64-NEXT: addiw a0, a0, 1718 ; CHECK-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-RV64-NEXT: vmv.s.x v0, a0 +; CHECK-RV64-NEXT: .cfi_def_cfa_offset 0 ; CHECK-RV64-NEXT: ret ret <16 x i1> } @@ -328,6 +346,7 @@ ; CHECK-NEXT: addi a0, zero, 1722 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret <16 x i1> } @@ -341,6 +360,7 @@ ; RV32-LMULMAX1-NEXT: lui a0, 11 ; RV32-LMULMAX1-NEXT: addi a0, a0, 1718 ; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0 +; RV32-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX1-NEXT: ret ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v32i1: @@ -351,6 +371,7 @@ ; RV64-LMULMAX1-NEXT: lui a0, 11 ; RV64-LMULMAX1-NEXT: addiw a0, a0, 1718 ; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0 +; RV64-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX1-NEXT: ret ; ; RV32-LMULMAX2-LABEL: buildvec_mask_v32i1: @@ -359,6 +380,7 @@ ; RV32-LMULMAX2-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX2-NEXT: ret ; ; RV64-LMULMAX2-LABEL: buildvec_mask_v32i1: @@ -367,6 +389,7 @@ ; RV64-LMULMAX2-NEXT: addiw a0, a0, 1776 ; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX2-NEXT: ret ; ; RV32-LMULMAX4-LABEL: buildvec_mask_v32i1: @@ -375,6 +398,7 @@ ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX4-NEXT: ret ; ; RV64-LMULMAX4-LABEL: buildvec_mask_v32i1: @@ -383,6 +407,7 @@ ; RV64-LMULMAX4-NEXT: addiw a0, a0, 1776 ; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX4-NEXT: ret ; ; RV32-LMULMAX8-LABEL: buildvec_mask_v32i1: @@ -391,6 +416,7 @@ ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 +; RV32-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX8-NEXT: ret ; ; RV64-LMULMAX8-LABEL: buildvec_mask_v32i1: @@ -399,6 +425,7 @@ ; RV64-LMULMAX8-NEXT: addiw a0, a0, 1776 ; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX8-NEXT: ret ret <32 x i1> } @@ -416,6 +443,7 @@ ; RV32-LMULMAX1-NEXT: addi a0, a0, 1718 ; RV32-LMULMAX1-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX1-NEXT: vmv1r.v v10, v8 +; RV32-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX1-NEXT: ret ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v64i1: @@ -430,6 +458,7 @@ ; RV64-LMULMAX1-NEXT: addiw a0, a0, 1718 ; RV64-LMULMAX1-NEXT: vmv.s.x v8, a0 ; RV64-LMULMAX1-NEXT: vmv1r.v v10, v8 +; RV64-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX1-NEXT: ret ; ; RV32-LMULMAX2-LABEL: buildvec_mask_v64i1: @@ -441,6 +470,7 @@ ; RV32-LMULMAX2-NEXT: lui a0, 748388 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 ; RV32-LMULMAX2-NEXT: vmv.s.x v8, a0 +; RV32-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX2-NEXT: ret ; ; RV64-LMULMAX2-LABEL: buildvec_mask_v64i1: @@ -452,6 +482,7 @@ ; RV64-LMULMAX2-NEXT: lui a0, 748388 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 ; RV64-LMULMAX2-NEXT: vmv.s.x v8, a0 +; RV64-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX2-NEXT: ret ; ; RV32-LMULMAX4-LABEL: buildvec_mask_v64i1: @@ -465,6 +496,7 @@ ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 +; RV32-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX4-NEXT: ret ; ; RV64-LMULMAX4-LABEL: buildvec_mask_v64i1: @@ -479,6 +511,7 @@ ; RV64-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX4-NEXT: ret ; ; RV32-LMULMAX8-LABEL: buildvec_mask_v64i1: @@ -492,6 +525,7 @@ ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 +; RV32-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX8-NEXT: ret ; ; RV64-LMULMAX8-LABEL: buildvec_mask_v64i1: @@ -506,6 +540,7 @@ ; RV64-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX8-NEXT: ret ret <64 x i1> } @@ -531,6 +566,7 @@ ; RV32-LMULMAX1-NEXT: vmv1r.v v10, v8 ; RV32-LMULMAX1-NEXT: vmv1r.v v11, v0 ; RV32-LMULMAX1-NEXT: vmv1r.v v13, v9 +; RV32-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX1-NEXT: ret ; ; RV64-LMULMAX1-LABEL: buildvec_mask_v128i1: @@ -553,6 +589,7 @@ ; RV64-LMULMAX1-NEXT: vmv1r.v v10, v8 ; RV64-LMULMAX1-NEXT: vmv1r.v v11, v0 ; RV64-LMULMAX1-NEXT: vmv1r.v v13, v9 +; RV64-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX1-NEXT: ret ; ; RV32-LMULMAX2-LABEL: buildvec_mask_v128i1: @@ -570,6 +607,7 @@ ; RV32-LMULMAX2-NEXT: lui a0, 945060 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 ; RV32-LMULMAX2-NEXT: vmv.s.x v10, a0 +; RV32-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX2-NEXT: ret ; ; RV64-LMULMAX2-LABEL: buildvec_mask_v128i1: @@ -587,6 +625,7 @@ ; RV64-LMULMAX2-NEXT: lui a0, 945060 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 ; RV64-LMULMAX2-NEXT: vmv.s.x v10, a0 +; RV64-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX2-NEXT: ret ; ; RV32-LMULMAX4-LABEL: buildvec_mask_v128i1: @@ -609,6 +648,7 @@ ; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; RV32-LMULMAX4-NEXT: vslideup.vi v8, v9, 1 +; RV32-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX4-NEXT: ret ; ; RV64-LMULMAX4-LABEL: buildvec_mask_v128i1: @@ -630,6 +670,7 @@ ; RV64-LMULMAX4-NEXT: slli a0, a0, 17 ; RV64-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX4-NEXT: ret ; ; RV32-LMULMAX8-LABEL: buildvec_mask_v128i1: @@ -655,6 +696,7 @@ ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3 +; RV32-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX8-NEXT: ret ; ; RV64-LMULMAX8-LABEL: buildvec_mask_v128i1: @@ -678,6 +720,7 @@ ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 +; RV64-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> } @@ -703,6 +746,7 @@ ; RV32-LMULMAX1-NEXT: vmv1r.v v10, v8 ; RV32-LMULMAX1-NEXT: vmv1r.v v11, v0 ; RV32-LMULMAX1-NEXT: vmv1r.v v13, v9 +; RV32-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX1-NEXT: ret ; ; RV64-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1: @@ -725,6 +769,7 @@ ; RV64-LMULMAX1-NEXT: vmv1r.v v10, v8 ; RV64-LMULMAX1-NEXT: vmv1r.v v11, v0 ; RV64-LMULMAX1-NEXT: vmv1r.v v13, v9 +; RV64-LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX1-NEXT: ret ; ; RV32-LMULMAX2-LABEL: buildvec_mask_optsize_v128i1: @@ -742,6 +787,7 @@ ; RV32-LMULMAX2-NEXT: lui a0, 945060 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 ; RV32-LMULMAX2-NEXT: vmv.s.x v10, a0 +; RV32-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX2-NEXT: ret ; ; RV64-LMULMAX2-LABEL: buildvec_mask_optsize_v128i1: @@ -759,6 +805,7 @@ ; RV64-LMULMAX2-NEXT: lui a0, 945060 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 ; RV64-LMULMAX2-NEXT: vmv.s.x v10, a0 +; RV64-LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX2-NEXT: ret ; ; RV32-LMULMAX4-LABEL: buildvec_mask_optsize_v128i1: @@ -771,6 +818,7 @@ ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1) ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1) ; RV32-LMULMAX4-NEXT: vlm.v v8, (a0) +; RV32-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX4-NEXT: ret ; ; RV64-LMULMAX4-LABEL: buildvec_mask_optsize_v128i1: @@ -792,6 +840,7 @@ ; RV64-LMULMAX4-NEXT: slli a0, a0, 17 ; RV64-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0 +; RV64-LMULMAX4-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX4-NEXT: ret ; ; RV32-LMULMAX8-LABEL: buildvec_mask_optsize_v128i1: @@ -801,6 +850,7 @@ ; RV32-LMULMAX8-NEXT: addi a1, zero, 128 ; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; RV32-LMULMAX8-NEXT: vlm.v v0, (a0) +; RV32-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV32-LMULMAX8-NEXT: ret ; ; RV64-LMULMAX8-LABEL: buildvec_mask_optsize_v128i1: @@ -810,6 +860,7 @@ ; RV64-LMULMAX8-NEXT: addi a1, zero, 128 ; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; RV64-LMULMAX8-NEXT: vlm.v v0, (a0) +; RV64-LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -18,6 +18,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <1 x i1>, <1 x i1>* %x store <1 x i1> %a, <1 x i1>* %y @@ -38,6 +39,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i1>, <2 x i1>* %x store <2 x i1> %a, <2 x i1>* %y @@ -58,6 +60,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i1>, <4 x i1>* %x store <4 x i1> %a, <4 x i1>* %y @@ -70,6 +73,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x store <8 x i1> %a, <8 x i1>* %y @@ -82,6 +86,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x store <16 x i1> %a, <16 x i1>* %y @@ -95,18 +100,21 @@ ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsm.v v8, (a1) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: load_store_v32i1: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: lw a0, 0(a0) ; LMULMAX1-RV32-NEXT: sw a0, 0(a1) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: load_store_v32i1: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: lw a0, 0(a0) ; LMULMAX1-RV64-NEXT: sw a0, 0(a1) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x store <32 x i1> %a, <32 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmand.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -28,6 +29,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmor.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -45,6 +47,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxor.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y @@ -61,6 +64,7 @@ ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmnand.mm v8, v8, v8 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %b = load <64 x i1>, <64 x i1>* %y @@ -77,6 +81,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmandnot.mm v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -94,6 +99,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmornot.mm v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -112,6 +118,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxnor.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y @@ -129,6 +136,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmnand.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -146,6 +154,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmnor.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -164,6 +173,7 @@ ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxnor.mm v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -18,6 +18,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <1 x i1> , <1 x i1>* %x ret void @@ -37,6 +38,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <2 x i1> zeroinitializer, <2 x i1>* %x ret void @@ -58,6 +60,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <1 x i1> undef, i1 %y, i32 0 %b = shufflevector <1 x i1> %a, <1 x i1> undef, <1 x i32> zeroinitializer @@ -82,6 +85,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = icmp eq i32 %y, %z %a = insertelement <1 x i1> undef, i1 %c, i32 0 @@ -104,6 +108,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <4 x i1> , <4 x i1>* %x ret void @@ -125,6 +130,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <4 x i1> undef, i1 %y, i32 0 %b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> zeroinitializer @@ -138,6 +144,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmclr.m v8 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <8 x i1> zeroinitializer, <8 x i1>* %x ret void @@ -151,6 +158,7 @@ ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <8 x i1> undef, i1 %y, i32 0 %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer @@ -164,6 +172,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v8 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store <16 x i1> , <16 x i1>* %x ret void @@ -177,6 +186,7 @@ ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <16 x i1> undef, i1 %y, i32 0 %b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer @@ -191,6 +201,7 @@ ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmclr.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zeros_v32i1: @@ -200,6 +211,7 @@ ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 2 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_zeros_v32i1: @@ -209,6 +221,7 @@ ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 2 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret store <32 x i1> zeroinitializer, <32 x i1>* %x ret void @@ -223,6 +236,7 @@ ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vmsne.vi v10, v8, 0 ; LMULMAX2-NEXT: vsm.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v32i1: @@ -234,6 +248,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v32i1: @@ -245,6 +260,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <32 x i1> undef, i1 %y, i32 0 %b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer @@ -261,6 +277,7 @@ ; LMULMAX2-NEXT: vmset.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: vsm.v v8, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_ones_v64i1: @@ -274,6 +291,7 @@ ; LMULMAX1-RV32-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV32-NEXT: addi a0, a0, 2 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_ones_v64i1: @@ -287,6 +305,7 @@ ; LMULMAX1-RV64-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV64-NEXT: addi a0, a0, 2 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret store <64 x i1> , <64 x i1>* %x ret void @@ -303,6 +322,7 @@ ; LMULMAX2-NEXT: addi a1, a0, 4 ; LMULMAX2-NEXT: vsm.v v10, (a1) ; LMULMAX2-NEXT: vsm.v v10, (a0) +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v64i1: @@ -318,6 +338,7 @@ ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV32-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v64i1: @@ -333,6 +354,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a1) ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) +; LMULMAX1-RV64-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-RV64-NEXT: ret %a = insertelement <64 x i1> undef, i1 %y, i32 0 %b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -12,6 +12,7 @@ ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i8: @@ -19,6 +20,7 @@ ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x i8> @llvm.masked.gather.v1i8.v1p0i8(<1 x i8*> %ptrs, i32 1, <1 x i1> %m, <1 x i8> %passthru) ret <1 x i8> %v @@ -32,6 +34,7 @@ ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8: @@ -39,6 +42,7 @@ ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) ret <2 x i8> %v @@ -51,6 +55,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_sextload_v2i16: @@ -59,6 +64,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = sext <2 x i8> %v to <2 x i16> @@ -72,6 +78,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_zextload_v2i16: @@ -80,6 +87,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = zext <2 x i8> %v to <2 x i16> @@ -93,6 +101,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_sextload_v2i32: @@ -101,6 +110,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = sext <2 x i8> %v to <2 x i32> @@ -114,6 +124,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_zextload_v2i32: @@ -122,6 +133,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = zext <2 x i8> %v to <2 x i32> @@ -135,6 +147,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf8 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_sextload_v2i64: @@ -143,6 +156,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf8 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = sext <2 x i8> %v to <2 x i64> @@ -156,6 +170,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf8 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i8_zextload_v2i64: @@ -164,6 +179,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf8 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) %ev = zext <2 x i8> %v to <2 x i64> @@ -178,6 +194,7 @@ ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i8: @@ -185,6 +202,7 @@ ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> %m, <4 x i8> %passthru) ret <4 x i8> %v @@ -196,6 +214,7 @@ ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i8: @@ -203,6 +222,7 @@ ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -214,11 +234,13 @@ ; RV32-LABEL: mgather_falsemask_v4i8: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4i8: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> zeroinitializer, <4 x i8> %passthru) ret <4 x i8> %v @@ -232,6 +254,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i8: @@ -239,6 +262,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> %m, <8 x i8> %passthru) ret <8 x i8> %v @@ -252,6 +276,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8: @@ -261,6 +286,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs %v = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> %m, <8 x i8> %passthru) @@ -275,6 +301,7 @@ ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i16: @@ -282,6 +309,7 @@ ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x i16> @llvm.masked.gather.v1i16.v1p0i16(<1 x i16*> %ptrs, i32 2, <1 x i1> %m, <1 x i16> %passthru) ret <1 x i16> %v @@ -295,6 +323,7 @@ ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16: @@ -302,6 +331,7 @@ ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) ret <2 x i16> %v @@ -314,6 +344,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_sextload_v2i32: @@ -322,6 +353,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) %ev = sext <2 x i16> %v to <2 x i32> @@ -335,6 +367,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_zextload_v2i32: @@ -343,6 +376,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) %ev = zext <2 x i16> %v to <2 x i32> @@ -356,6 +390,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_sextload_v2i64: @@ -364,6 +399,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) %ev = sext <2 x i16> %v to <2 x i64> @@ -377,6 +413,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_zextload_v2i64: @@ -385,6 +422,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) %ev = zext <2 x i16> %v to <2 x i64> @@ -399,6 +437,7 @@ ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i16: @@ -406,6 +445,7 @@ ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> %m, <4 x i16> %passthru) ret <4 x i16> %v @@ -417,6 +457,7 @@ ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i16: @@ -424,6 +465,7 @@ ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -435,11 +477,13 @@ ; RV32-LABEL: mgather_falsemask_v4i16: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> zeroinitializer, <4 x i16> %passthru) ret <4 x i16> %v @@ -453,6 +497,7 @@ ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i16: @@ -460,6 +505,7 @@ ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) ret <8 x i16> %v @@ -474,6 +520,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i16: @@ -484,6 +531,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -499,6 +547,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i16: @@ -509,6 +558,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -525,6 +575,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i16: @@ -535,6 +586,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -551,6 +603,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16: @@ -561,6 +614,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -575,6 +629,7 @@ ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i32: @@ -582,6 +637,7 @@ ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x i32> @llvm.masked.gather.v1i32.v1p0i32(<1 x i32*> %ptrs, i32 4, <1 x i1> %m, <1 x i32> %passthru) ret <1 x i32> %v @@ -595,6 +651,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i32: @@ -602,6 +659,7 @@ ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, i32 4, <2 x i1> %m, <2 x i32> %passthru) ret <2 x i32> %v @@ -614,6 +672,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i32_sextload_v2i64: @@ -622,6 +681,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, i32 4, <2 x i1> %m, <2 x i32> %passthru) %ev = sext <2 x i32> %v to <2 x i64> @@ -635,6 +695,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i32_zextload_v2i64: @@ -643,6 +704,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, i32 4, <2 x i1> %m, <2 x i32> %passthru) %ev = zext <2 x i32> %v to <2 x i64> @@ -657,6 +719,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i32: @@ -664,6 +727,7 @@ ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> %m, <4 x i32> %passthru) ret <4 x i32> %v @@ -674,6 +738,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i32: @@ -681,6 +746,7 @@ ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -692,11 +758,13 @@ ; RV32-LABEL: mgather_falsemask_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> zeroinitializer, <4 x i32> %passthru) ret <4 x i32> %v @@ -710,6 +778,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i32: @@ -717,6 +786,7 @@ ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) ret <8 x i32> %v @@ -730,6 +800,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i32: @@ -740,6 +811,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -754,6 +826,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i32: @@ -764,6 +837,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -779,6 +853,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i32: @@ -789,6 +864,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -804,6 +880,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8i32: @@ -814,6 +891,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -828,6 +906,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8i32: @@ -838,6 +917,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -853,6 +933,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8i32: @@ -863,6 +944,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -877,6 +959,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32: @@ -887,6 +970,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -901,6 +985,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i64: @@ -908,6 +993,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x i64> @llvm.masked.gather.v1i64.v1p0i64(<1 x i64*> %ptrs, i32 8, <1 x i1> %m, <1 x i64> %passthru) ret <1 x i64> %v @@ -921,6 +1007,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i64: @@ -928,6 +1015,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, i32 8, <2 x i1> %m, <2 x i64> %passthru) ret <2 x i64> %v @@ -941,6 +1029,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i64: @@ -948,6 +1037,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, i32 8, <4 x i1> %m, <4 x i64> %passthru) ret <4 x i64> %v @@ -959,12 +1049,14 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -976,11 +1068,13 @@ ; RV32-LABEL: mgather_falsemask_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, i32 8, <4 x i1> zeroinitializer, <4 x i64> %passthru) ret <4 x i64> %v @@ -994,6 +1088,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i64: @@ -1001,6 +1096,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) ret <8 x i64> %v @@ -1015,6 +1111,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i64: @@ -1024,6 +1121,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1038,6 +1136,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i64: @@ -1047,6 +1146,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1062,6 +1162,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i64: @@ -1071,6 +1172,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1087,6 +1189,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8i64: @@ -1096,6 +1199,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1110,6 +1214,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8i64: @@ -1119,6 +1224,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1134,6 +1240,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8i64: @@ -1143,6 +1250,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1158,6 +1266,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32_v8i64: @@ -1167,6 +1276,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1181,6 +1291,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i32_v8i64: @@ -1190,6 +1301,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1205,6 +1317,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i32_v8i64: @@ -1214,6 +1327,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1228,6 +1342,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i64: @@ -1236,6 +1351,7 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1250,6 +1366,7 @@ ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f16: @@ -1257,6 +1374,7 @@ ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x half> @llvm.masked.gather.v1f16.v1p0f16(<1 x half*> %ptrs, i32 2, <1 x i1> %m, <1 x half> %passthru) ret <1 x half> %v @@ -1270,6 +1388,7 @@ ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f16: @@ -1277,6 +1396,7 @@ ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x half> @llvm.masked.gather.v2f16.v2p0f16(<2 x half*> %ptrs, i32 2, <2 x i1> %m, <2 x half> %passthru) ret <2 x half> %v @@ -1290,6 +1410,7 @@ ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f16: @@ -1297,6 +1418,7 @@ ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> %ptrs, i32 2, <4 x i1> %m, <4 x half> %passthru) ret <4 x half> %v @@ -1308,6 +1430,7 @@ ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4f16: @@ -1315,6 +1438,7 @@ ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1326,11 +1450,13 @@ ; RV32-LABEL: mgather_falsemask_v4f16: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4f16: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> %ptrs, i32 2, <4 x i1> zeroinitializer, <4 x half> %passthru) ret <4 x half> %v @@ -1344,6 +1470,7 @@ ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f16: @@ -1351,6 +1478,7 @@ ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) ret <8 x half> %v @@ -1365,6 +1493,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f16: @@ -1375,6 +1504,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -1390,6 +1520,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f16: @@ -1400,6 +1531,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1416,6 +1548,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f16: @@ -1426,6 +1559,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1442,6 +1576,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f16: @@ -1452,6 +1587,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -1466,6 +1602,7 @@ ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f32: @@ -1473,6 +1610,7 @@ ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x float> @llvm.masked.gather.v1f32.v1p0f32(<1 x float*> %ptrs, i32 4, <1 x i1> %m, <1 x float> %passthru) ret <1 x float> %v @@ -1486,6 +1624,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f32: @@ -1493,6 +1632,7 @@ ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> %ptrs, i32 4, <2 x i1> %m, <2 x float> %passthru) ret <2 x float> %v @@ -1506,6 +1646,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f32: @@ -1513,6 +1654,7 @@ ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> %m, <4 x float> %passthru) ret <4 x float> %v @@ -1523,6 +1665,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4f32: @@ -1530,6 +1673,7 @@ ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1541,11 +1685,13 @@ ; RV32-LABEL: mgather_falsemask_v4f32: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> zeroinitializer, <4 x float> %passthru) ret <4 x float> %v @@ -1559,6 +1705,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f32: @@ -1566,6 +1713,7 @@ ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) ret <8 x float> %v @@ -1579,6 +1727,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f32: @@ -1589,6 +1738,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1603,6 +1753,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f32: @@ -1613,6 +1764,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1628,6 +1780,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f32: @@ -1638,6 +1791,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1653,6 +1807,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8f32: @@ -1663,6 +1818,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1677,6 +1833,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8f32: @@ -1687,6 +1844,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1702,6 +1860,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8f32: @@ -1712,6 +1871,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1726,6 +1886,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f32: @@ -1736,6 +1897,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1750,6 +1912,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f64: @@ -1757,6 +1920,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <1 x double> @llvm.masked.gather.v1f64.v1p0f64(<1 x double*> %ptrs, i32 8, <1 x i1> %m, <1 x double> %passthru) ret <1 x double> %v @@ -1770,6 +1934,7 @@ ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f64: @@ -1777,6 +1942,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 8, <2 x i1> %m, <2 x double> %passthru) ret <2 x double> %v @@ -1790,6 +1956,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f64: @@ -1797,6 +1964,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 8, <4 x i1> %m, <4 x double> %passthru) ret <4 x double> %v @@ -1808,12 +1976,14 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1825,11 +1995,13 @@ ; RV32-LABEL: mgather_falsemask_v4f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 8, <4 x i1> zeroinitializer, <4 x double> %passthru) ret <4 x double> %v @@ -1843,6 +2015,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f64: @@ -1850,6 +2023,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) ret <8 x double> %v @@ -1864,6 +2038,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f64: @@ -1873,6 +2048,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -1887,6 +2063,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f64: @@ -1896,6 +2073,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1911,6 +2089,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f64: @@ -1920,6 +2099,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1936,6 +2116,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8f64: @@ -1945,6 +2126,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -1959,6 +2141,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8f64: @@ -1968,6 +2151,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1983,6 +2167,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8f64: @@ -1992,6 +2177,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2007,6 +2193,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32_v8f64: @@ -2016,6 +2203,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -2030,6 +2218,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i32_v8f64: @@ -2039,6 +2228,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2054,6 +2244,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i32_v8f64: @@ -2063,6 +2254,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2077,6 +2269,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f64: @@ -2085,6 +2278,7 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -2101,6 +2295,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v16i8: @@ -2110,6 +2305,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %idxs %v = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 2, <16 x i1> %m, <16 x i8> %passthru) @@ -2127,6 +2323,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v32i8: @@ -2153,6 +2350,7 @@ ; RV64-NEXT: vslideup.vi v8, v10, 0 ; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; RV64-NEXT: vslideup.vi v8, v14, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs %v = call <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*> %ptrs, i32 2, <32 x i1> %m, <32 x i8> %passthru) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -11,12 +11,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1i8.v1p0i8(<1 x i8> %val, <1 x i8*> %ptrs, i32 1, <1 x i1> %m) ret void @@ -29,12 +31,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> %val, <2 x i8*> %ptrs, i32 1, <2 x i1> %m) ret void @@ -46,6 +50,7 @@ ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8: @@ -53,6 +58,7 @@ ; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i16> %val to <2 x i8> call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, i32 1, <2 x i1> %m) @@ -67,6 +73,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8: @@ -76,6 +83,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i8> call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, i32 1, <2 x i1> %m) @@ -92,6 +100,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8: @@ -103,6 +112,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i8> call void @llvm.masked.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, i32 1, <2 x i1> %m) @@ -116,12 +126,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> %m) ret void @@ -132,12 +144,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -148,10 +162,12 @@ define void @mscatter_falsemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4i8: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4i8: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 1, <4 x i1> zeroinitializer) ret void @@ -164,12 +180,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 1, <8 x i1> %m) ret void @@ -182,6 +200,7 @@ ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8: @@ -190,6 +209,7 @@ ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 1, <8 x i1> %m) @@ -203,12 +223,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1i16.v1p0i16(<1 x i16> %val, <1 x i16*> %ptrs, i32 2, <1 x i1> %m) ret void @@ -221,12 +243,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> %val, <2 x i16*> %ptrs, i32 2, <2 x i1> %m) ret void @@ -238,6 +262,7 @@ ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16: @@ -245,6 +270,7 @@ ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i16> call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, i32 2, <2 x i1> %m) @@ -259,6 +285,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16: @@ -268,6 +295,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i16> call void @llvm.masked.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, i32 2, <2 x i1> %m) @@ -281,12 +309,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 2, <4 x i1> %m) ret void @@ -297,12 +327,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -313,10 +345,12 @@ define void @mscatter_falsemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4i16: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4i16: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 2, <4 x i1> zeroinitializer) ret void @@ -329,12 +363,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, i32 2, <8 x i1> %m) ret void @@ -348,6 +384,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i16: @@ -357,6 +394,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, i32 2, <8 x i1> %m) @@ -371,6 +409,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i16: @@ -380,6 +419,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -395,6 +435,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i16: @@ -404,6 +445,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -419,6 +461,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16: @@ -428,6 +471,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, i32 2, <8 x i1> %m) @@ -441,12 +485,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1i32.v1p0i32(<1 x i32> %val, <1 x i32*> %ptrs, i32 4, <1 x i1> %m) ret void @@ -459,12 +505,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, i32 4, <2 x i1> %m) ret void @@ -476,6 +524,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32: @@ -483,6 +532,7 @@ ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> %tval, <2 x i32*> %ptrs, i32 4, <2 x i1> %m) @@ -496,12 +546,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 4, <4 x i1> %m) ret void @@ -512,12 +564,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -528,10 +582,12 @@ define void @mscatter_falsemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4i32: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4i32: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 4, <4 x i1> zeroinitializer) ret void @@ -544,12 +600,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, i32 4, <8 x i1> %m) ret void @@ -562,6 +620,7 @@ ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i32: @@ -571,6 +630,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, i32 4, <8 x i1> %m) @@ -584,6 +644,7 @@ ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i32: @@ -593,6 +654,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -607,6 +669,7 @@ ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i32: @@ -616,6 +679,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -630,6 +694,7 @@ ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8i32: @@ -639,6 +704,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, i32 4, <8 x i1> %m) @@ -652,6 +718,7 @@ ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i32: @@ -661,6 +728,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -675,6 +743,7 @@ ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i32: @@ -684,6 +753,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -697,6 +767,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32: @@ -706,6 +777,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, i32 4, <8 x i1> %m) @@ -719,12 +791,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1i64.v1p0i64(<1 x i64> %val, <1 x i64*> %ptrs, i32 8, <1 x i1> %m) ret void @@ -737,12 +811,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2i64.v2p0i64(<2 x i64> %val, <2 x i64*> %ptrs, i32 8, <2 x i1> %m) ret void @@ -755,12 +831,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 8, <4 x i1> %m) ret void @@ -771,12 +849,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -787,10 +867,12 @@ define void @mscatter_falsemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4i64: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4i64: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 8, <4 x i1> zeroinitializer) ret void @@ -803,12 +885,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) ret void @@ -822,6 +906,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i64: @@ -830,6 +915,7 @@ ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) @@ -843,6 +929,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i64: @@ -851,6 +938,7 @@ ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -865,6 +953,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i64: @@ -873,6 +962,7 @@ ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -888,6 +978,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8i64: @@ -896,6 +987,7 @@ ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) @@ -909,6 +1001,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i64: @@ -917,6 +1010,7 @@ ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -931,6 +1025,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i64: @@ -939,6 +1034,7 @@ ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -953,6 +1049,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32_v8i64: @@ -961,6 +1058,7 @@ ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) @@ -974,6 +1072,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8i64: @@ -982,6 +1081,7 @@ ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -996,6 +1096,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8i64: @@ -1004,6 +1105,7 @@ ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1017,6 +1119,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i64: @@ -1024,6 +1127,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v12, v12, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs call void @llvm.masked.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, i32 8, <8 x i1> %m) @@ -1037,12 +1141,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1f16.v1p0f16(<1 x half> %val, <1 x half*> %ptrs, i32 2, <1 x i1> %m) ret void @@ -1055,12 +1161,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2f16.v2p0f16(<2 x half> %val, <2 x half*> %ptrs, i32 2, <2 x i1> %m) ret void @@ -1073,12 +1181,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, i32 2, <4 x i1> %m) ret void @@ -1089,12 +1199,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1105,10 +1217,12 @@ define void @mscatter_falsemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4f16: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4f16: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, i32 2, <4 x i1> zeroinitializer) ret void @@ -1121,12 +1235,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, i32 2, <8 x i1> %m) ret void @@ -1140,6 +1256,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f16: @@ -1149,6 +1266,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, i32 2, <8 x i1> %m) @@ -1163,6 +1281,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f16: @@ -1172,6 +1291,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1187,6 +1307,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f16: @@ -1196,6 +1317,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1211,6 +1333,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f16: @@ -1220,6 +1343,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, i32 2, <8 x i1> %m) @@ -1233,12 +1357,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1f32.v1p0f32(<1 x float> %val, <1 x float*> %ptrs, i32 4, <1 x i1> %m) ret void @@ -1251,12 +1377,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> %val, <2 x float*> %ptrs, i32 4, <2 x i1> %m) ret void @@ -1269,12 +1397,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, i32 4, <4 x i1> %m) ret void @@ -1285,12 +1415,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1301,10 +1433,12 @@ define void @mscatter_falsemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4f32: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4f32: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, i32 4, <4 x i1> zeroinitializer) ret void @@ -1317,12 +1451,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, i32 4, <8 x i1> %m) ret void @@ -1335,6 +1471,7 @@ ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f32: @@ -1344,6 +1481,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, i32 4, <8 x i1> %m) @@ -1357,6 +1495,7 @@ ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f32: @@ -1366,6 +1505,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1380,6 +1520,7 @@ ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f32: @@ -1389,6 +1530,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1403,6 +1545,7 @@ ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8f32: @@ -1412,6 +1555,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, i32 4, <8 x i1> %m) @@ -1425,6 +1569,7 @@ ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f32: @@ -1434,6 +1579,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1448,6 +1594,7 @@ ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f32: @@ -1457,6 +1604,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1470,6 +1618,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f32: @@ -1479,6 +1628,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs call void @llvm.masked.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, i32 4, <8 x i1> %m) @@ -1492,12 +1642,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v1f64.v1p0f64(<1 x double> %val, <1 x double*> %ptrs, i32 8, <1 x i1> %m) ret void @@ -1510,12 +1662,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, i32 8, <2 x i1> %m) ret void @@ -1528,12 +1682,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, i32 8, <4 x i1> %m) ret void @@ -1544,12 +1700,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1560,10 +1718,12 @@ define void @mscatter_falsemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) { ; RV32-LABEL: mscatter_falsemask_v4f64: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_v4f64: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, i32 8, <4 x i1> zeroinitializer) ret void @@ -1576,12 +1736,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) ret void @@ -1595,6 +1757,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f64: @@ -1603,6 +1766,7 @@ ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) @@ -1616,6 +1780,7 @@ ; RV32-NEXT: vsext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f64: @@ -1624,6 +1789,7 @@ ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1638,6 +1804,7 @@ ; RV32-NEXT: vzext.vf8 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f64: @@ -1646,6 +1813,7 @@ ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1661,6 +1829,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8f64: @@ -1669,6 +1838,7 @@ ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) @@ -1682,6 +1852,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f64: @@ -1690,6 +1861,7 @@ ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1704,6 +1876,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f64: @@ -1712,6 +1885,7 @@ ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1726,6 +1900,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32_v8f64: @@ -1734,6 +1909,7 @@ ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) @@ -1747,6 +1923,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8f64: @@ -1755,6 +1932,7 @@ ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1769,6 +1947,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8f64: @@ -1777,6 +1956,7 @@ ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1790,6 +1970,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f64: @@ -1797,6 +1978,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v12, v12, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs call void @llvm.masked.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, i32 8, <8 x i1> %m) @@ -1812,6 +1994,7 @@ ; RV32-NEXT: vsext.vf4 v12, v9 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v16i8: @@ -1820,6 +2003,7 @@ ; RV64-NEXT: vsext.vf8 v16, v9 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %idxs call void @llvm.masked.scatter.v16i8.v16p0i8(<16 x i8> %val, <16 x i8*> %ptrs, i32 1, <16 x i1> %m) @@ -1836,6 +2020,7 @@ ; RV32-NEXT: vsext.vf4 v16, v10 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v32i8: @@ -1853,6 +2038,7 @@ ; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs call void @llvm.masked.scatter.v32i8.v32p0i8(<32 x i8> %val, <32 x i8*> %ptrs, i32 1, <32 x i1> %m) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc half @llvm.vp.reduce.fadd.v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 %evl) ret half %r @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call half @llvm.vp.reduce.fadd.v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 %evl) ret half %r @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc half @llvm.vp.reduce.fadd.v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 %evl) ret half %r @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call half @llvm.vp.reduce.fadd.v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 %evl) ret half %r @@ -70,6 +74,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl) ret float %r @@ -83,6 +88,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl) ret float %r @@ -98,6 +104,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl) ret float %r @@ -111,6 +118,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl) ret float %r @@ -126,6 +134,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl) ret double %r @@ -139,6 +148,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl) ret double %r @@ -154,6 +164,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl) ret double %r @@ -167,6 +178,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl) ret double %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x half>, <1 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v) @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x half>, <1 x half>* %x %red = call half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v) @@ -48,6 +50,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v) @@ -64,6 +67,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x %red = call half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v) @@ -85,6 +89,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v) @@ -101,6 +106,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v) @@ -122,6 +128,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x half>, <8 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v) @@ -138,6 +145,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x half>, <8 x half>* %x %red = call half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v) @@ -159,6 +167,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x half>, <16 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v) @@ -175,6 +184,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x half>, <16 x half>* %x %red = call half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v) @@ -197,6 +207,7 @@ ; RV32-NEXT: vfredusum.vs v8, v8, v12 ; RV32-NEXT: vfmv.f.s ft0, v8 ; RV32-NEXT: fadd.h fa0, fa0, ft0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_fadd_v32f16: @@ -212,6 +223,7 @@ ; RV64-NEXT: vfredusum.vs v8, v8, v12 ; RV64-NEXT: vfmv.f.s ft0, v8 ; RV64-NEXT: fadd.h fa0, fa0, ft0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x half>, <32 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v) @@ -229,6 +241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x half>, <32 x half>* %x %red = call half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v) @@ -251,6 +264,7 @@ ; RV32-NEXT: vfredusum.vs v8, v8, v16 ; RV32-NEXT: vfmv.f.s ft0, v8 ; RV32-NEXT: fadd.h fa0, fa0, ft0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_fadd_v64f16: @@ -266,6 +280,7 @@ ; RV64-NEXT: vfredusum.vs v8, v8, v16 ; RV64-NEXT: vfmv.f.s ft0, v8 ; RV64-NEXT: fadd.h fa0, fa0, ft0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <64 x half>, <64 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v) @@ -283,6 +298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x half>, <64 x half>* %x %red = call half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v) @@ -308,6 +324,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x half>, <128 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v) @@ -332,6 +349,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x half>, <128 x half>* %x %red = call half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v) @@ -347,6 +365,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x float>, <1 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v) @@ -363,6 +382,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x float>, <1 x float>* %x %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v) @@ -384,6 +404,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v) @@ -400,6 +421,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x %red = call float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v) @@ -421,6 +443,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v) @@ -437,6 +460,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v) @@ -458,6 +482,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x float>, <8 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v) @@ -474,6 +499,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x float>, <8 x float>* %x %red = call float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v) @@ -495,6 +521,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x float>, <16 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v) @@ -511,6 +538,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x float>, <16 x float>* %x %red = call float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v) @@ -533,6 +561,7 @@ ; RV32-NEXT: vfredusum.vs v8, v8, v16 ; RV32-NEXT: vfmv.f.s ft0, v8 ; RV32-NEXT: fadd.s fa0, fa0, ft0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_fadd_v32f32: @@ -548,6 +577,7 @@ ; RV64-NEXT: vfredusum.vs v8, v8, v16 ; RV64-NEXT: vfmv.f.s ft0, v8 ; RV64-NEXT: fadd.s fa0, fa0, ft0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x float>, <32 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v) @@ -565,6 +595,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x float>, <32 x float>* %x %red = call float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v) @@ -590,6 +621,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x float>, <64 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v) @@ -614,6 +646,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x float>, <64 x float>* %x %red = call float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v) @@ -629,6 +662,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x double>, <1 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v) @@ -645,6 +679,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x double>, <1 x double>* %x %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v) @@ -666,6 +701,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x double>, <2 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v) @@ -682,6 +718,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x double>, <2 x double>* %x %red = call double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v) @@ -703,6 +740,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v) @@ -719,6 +757,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v) @@ -740,6 +779,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x double>, <8 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v) @@ -756,6 +796,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x double>, <8 x double>* %x %red = call double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v) @@ -777,6 +818,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x double>, <16 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v) @@ -793,6 +835,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x double>, <16 x double>* %x %red = call double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v) @@ -817,6 +860,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x double>, <32 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v) @@ -840,6 +884,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x double>, <32 x double>* %x %red = call double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v) @@ -860,6 +905,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x %red = call half @llvm.vector.reduce.fmin.v2f16(<2 x half> %v) @@ -880,6 +926,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) @@ -898,6 +945,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call nnan half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) @@ -916,6 +964,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call nnan ninf half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) @@ -940,6 +989,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x half>, <128 x half>* %x %red = call half @llvm.vector.reduce.fmin.v128f16(<128 x half> %v) @@ -960,6 +1010,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x %red = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %v) @@ -980,6 +1031,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) @@ -998,6 +1050,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) @@ -1016,6 +1069,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call nnan ninf float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) @@ -1046,6 +1100,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x float>, <128 x float>* %x %red = call float @llvm.vector.reduce.fmin.v128f32(<128 x float> %v) @@ -1066,6 +1121,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x double>, <2 x double>* %x %red = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %v) @@ -1086,6 +1142,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) @@ -1104,6 +1161,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call nnan double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) @@ -1122,6 +1180,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call nnan ninf double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) @@ -1145,6 +1204,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x double>, <32 x double>* %x %red = call double @llvm.vector.reduce.fmin.v32f64(<32 x double> %v) @@ -1165,6 +1225,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x %red = call half @llvm.vector.reduce.fmax.v2f16(<2 x half> %v) @@ -1185,6 +1246,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) @@ -1203,6 +1265,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call nnan half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) @@ -1221,6 +1284,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x %red = call nnan ninf half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) @@ -1245,6 +1309,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x half>, <128 x half>* %x %red = call half @llvm.vector.reduce.fmax.v128f16(<128 x half> %v) @@ -1265,6 +1330,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x %red = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %v) @@ -1285,6 +1351,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) @@ -1303,6 +1370,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) @@ -1321,6 +1389,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x %red = call nnan ninf float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) @@ -1351,6 +1420,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x float>, <128 x float>* %x %red = call float @llvm.vector.reduce.fmax.v128f32(<128 x float> %v) @@ -1371,6 +1441,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x double>, <2 x double>* %x %red = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %v) @@ -1391,6 +1462,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) @@ -1409,6 +1481,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call nnan double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) @@ -1427,6 +1500,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call nnan ninf double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) @@ -1450,6 +1524,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x double>, <32 x double>* %x %red = call double @llvm.vector.reduce.fmax.v32f64(<32 x double> %v) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -30,6 +31,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -45,6 +47,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -61,6 +64,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -76,6 +80,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -91,6 +96,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -106,6 +112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -121,6 +128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.v2i8(i8 %s, <2 x i8> %v, <2 x i1> %m, i32 %evl) ret i8 %r @@ -136,6 +144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -152,6 +161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -167,6 +177,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -183,6 +194,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -198,6 +210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -213,6 +226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -228,6 +242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -243,6 +258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.v4i8(i8 %s, <4 x i8> %v, <4 x i1> %m, i32 %evl) ret i8 %r @@ -258,6 +274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -276,6 +293,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i16: @@ -288,6 +306,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -303,6 +322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -321,6 +341,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i16: @@ -333,6 +354,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -348,6 +370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -363,6 +386,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -378,6 +402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -393,6 +418,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl) ret i16 %r @@ -408,6 +434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -426,6 +453,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i16: @@ -438,6 +466,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -453,6 +482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -471,6 +501,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i16: @@ -483,6 +514,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -498,6 +530,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -513,6 +546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -528,6 +562,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -543,6 +578,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl) ret i16 %r @@ -558,6 +594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -573,6 +610,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i32: @@ -584,6 +622,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -599,6 +638,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -614,6 +654,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i32: @@ -625,6 +666,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -640,6 +682,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -655,6 +698,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -670,6 +714,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -685,6 +730,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.v2i32(i32 %s, <2 x i32> %v, <2 x i1> %m, i32 %evl) ret i32 %r @@ -700,6 +746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -715,6 +762,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i32: @@ -726,6 +774,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -741,6 +790,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -756,6 +806,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i32: @@ -767,6 +818,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -782,6 +834,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -797,6 +850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -812,6 +866,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -827,6 +882,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.v4i32(i32 %s, <4 x i32> %v, <4 x i1> %m, i32 %evl) ret i32 %r @@ -852,6 +908,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_v2i64: @@ -861,6 +918,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -886,6 +944,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v2i64: @@ -895,6 +954,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -920,6 +980,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_v2i64: @@ -929,6 +990,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -954,6 +1016,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v2i64: @@ -963,6 +1026,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -988,6 +1052,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_v2i64: @@ -997,6 +1062,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -1022,6 +1088,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_v2i64: @@ -1031,6 +1098,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -1056,6 +1124,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_v2i64: @@ -1065,6 +1134,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -1090,6 +1160,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_v2i64: @@ -1099,6 +1170,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.v2i64(i64 %s, <2 x i64> %v, <2 x i1> %m, i32 %evl) ret i64 %r @@ -1124,6 +1196,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_v4i64: @@ -1133,6 +1206,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1158,6 +1232,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_v4i64: @@ -1167,6 +1242,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1192,6 +1268,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_v4i64: @@ -1201,6 +1278,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1226,6 +1304,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_v4i64: @@ -1235,6 +1314,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1260,6 +1340,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_v4i64: @@ -1269,6 +1350,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1294,6 +1376,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_v4i64: @@ -1303,6 +1386,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1328,6 +1412,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_v4i64: @@ -1337,6 +1422,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r @@ -1362,6 +1448,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_v4i64: @@ -1371,6 +1458,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.v4i64(i64 %s, <4 x i64> %v, <4 x i1> %m, i32 %evl) ret i64 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v1i8(<1 x i8> %v) @@ -28,6 +29,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %v) @@ -46,6 +48,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %v) @@ -64,6 +67,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v) @@ -82,6 +86,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %v) @@ -101,6 +106,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %v) @@ -120,6 +126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v64i8(<64 x i8> %v) @@ -139,6 +146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v128i8(<128 x i8> %v) @@ -161,6 +169,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.add.v256i8(<256 x i8> %v) @@ -175,6 +184,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v1i16(<1 x i16> %v) @@ -193,6 +203,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %v) @@ -211,6 +222,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %v) @@ -229,6 +241,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %v) @@ -247,6 +260,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %v) @@ -266,6 +280,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v32i16(<32 x i16> %v) @@ -285,6 +300,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v64i16(<64 x i16> %v) @@ -307,6 +323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.add.v128i16(<128 x i16> %v) @@ -321,6 +338,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v1i32(<1 x i32> %v) @@ -339,6 +357,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %v) @@ -357,6 +376,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v) @@ -375,6 +395,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v) @@ -393,6 +414,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %v) @@ -412,6 +434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v32i32(<32 x i32> %v) @@ -434,6 +457,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.add.v64i32(<64 x i32> %v) @@ -451,6 +475,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v1i64: @@ -458,6 +483,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v1i64(<1 x i64> %v) @@ -480,6 +506,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v2i64: @@ -491,6 +518,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v) @@ -513,6 +541,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v4i64: @@ -524,6 +553,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v) @@ -546,6 +576,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v8i64: @@ -557,6 +588,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %v) @@ -579,6 +611,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v16i64: @@ -590,6 +623,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %v) @@ -615,6 +649,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v32i64: @@ -629,6 +664,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.add.v32i64(<32 x i64> %v) @@ -694,6 +730,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v1i8(<1 x i8> %v) @@ -712,6 +749,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v) @@ -730,6 +768,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v) @@ -748,6 +787,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v) @@ -766,6 +806,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v) @@ -785,6 +826,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %v) @@ -804,6 +846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v64i8(<64 x i8> %v) @@ -823,6 +866,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v128i8(<128 x i8> %v) @@ -845,6 +889,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.and.v256i8(<256 x i8> %v) @@ -859,6 +904,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v1i16(<1 x i16> %v) @@ -877,6 +923,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v) @@ -895,6 +942,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v) @@ -913,6 +961,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v) @@ -931,6 +980,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v) @@ -950,6 +1000,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v32i16(<32 x i16> %v) @@ -969,6 +1020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v64i16(<64 x i16> %v) @@ -991,6 +1043,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.and.v128i16(<128 x i16> %v) @@ -1005,6 +1058,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v1i32(<1 x i32> %v) @@ -1023,6 +1077,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v) @@ -1041,6 +1096,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v) @@ -1059,6 +1115,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v) @@ -1077,6 +1134,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v) @@ -1096,6 +1154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v32i32(<32 x i32> %v) @@ -1118,6 +1177,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.and.v64i32(<64 x i32> %v) @@ -1135,6 +1195,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v1i64: @@ -1142,6 +1203,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v1i64(<1 x i64> %v) @@ -1164,6 +1226,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v2i64: @@ -1175,6 +1238,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v) @@ -1197,6 +1261,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v4i64: @@ -1208,6 +1273,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v) @@ -1230,6 +1296,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v8i64: @@ -1241,6 +1308,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v) @@ -1263,6 +1331,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v16i64: @@ -1274,6 +1343,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v) @@ -1299,6 +1369,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v32i64: @@ -1313,6 +1384,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.and.v32i64(<32 x i64> %v) @@ -1378,6 +1450,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v1i8(<1 x i8> %v) @@ -1396,6 +1469,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v) @@ -1414,6 +1488,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v) @@ -1432,6 +1507,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v) @@ -1450,6 +1526,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v) @@ -1469,6 +1546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v) @@ -1488,6 +1566,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v64i8(<64 x i8> %v) @@ -1507,6 +1586,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v128i8(<128 x i8> %v) @@ -1529,6 +1609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.or.v256i8(<256 x i8> %v) @@ -1543,6 +1624,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v1i16(<1 x i16> %v) @@ -1561,6 +1643,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v) @@ -1579,6 +1662,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v) @@ -1597,6 +1681,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v) @@ -1615,6 +1700,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v) @@ -1634,6 +1720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v32i16(<32 x i16> %v) @@ -1653,6 +1740,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v64i16(<64 x i16> %v) @@ -1675,6 +1763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.or.v128i16(<128 x i16> %v) @@ -1689,6 +1778,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v1i32(<1 x i32> %v) @@ -1707,6 +1797,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v) @@ -1725,6 +1816,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v) @@ -1743,6 +1835,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v) @@ -1761,6 +1854,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v) @@ -1780,6 +1874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v32i32(<32 x i32> %v) @@ -1802,6 +1897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.or.v64i32(<64 x i32> %v) @@ -1819,6 +1915,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v1i64: @@ -1826,6 +1923,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v1i64(<1 x i64> %v) @@ -1848,6 +1946,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v2i64: @@ -1859,6 +1958,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v) @@ -1881,6 +1981,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v4i64: @@ -1892,6 +1993,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v) @@ -1914,6 +2016,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v8i64: @@ -1925,6 +2028,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %v) @@ -1947,6 +2051,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v16i64: @@ -1958,6 +2063,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> %v) @@ -1983,6 +2089,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v32i64: @@ -1997,6 +2104,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.or.v32i64(<32 x i64> %v) @@ -2062,6 +2170,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v1i8(<1 x i8> %v) @@ -2080,6 +2189,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %v) @@ -2098,6 +2208,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %v) @@ -2116,6 +2227,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v) @@ -2134,6 +2246,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %v) @@ -2153,6 +2266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %v) @@ -2172,6 +2286,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v64i8(<64 x i8> %v) @@ -2191,6 +2306,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v128i8(<128 x i8> %v) @@ -2213,6 +2329,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.xor.v256i8(<256 x i8> %v) @@ -2227,6 +2344,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v1i16(<1 x i16> %v) @@ -2245,6 +2363,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %v) @@ -2263,6 +2382,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v) @@ -2281,6 +2401,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %v) @@ -2299,6 +2420,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v) @@ -2318,6 +2440,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v32i16(<32 x i16> %v) @@ -2337,6 +2460,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v64i16(<64 x i16> %v) @@ -2359,6 +2483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.xor.v128i16(<128 x i16> %v) @@ -2373,6 +2498,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v1i32(<1 x i32> %v) @@ -2391,6 +2517,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v) @@ -2409,6 +2536,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v) @@ -2427,6 +2555,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v) @@ -2445,6 +2574,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v16i32(<16 x i32> %v) @@ -2464,6 +2594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v32i32(<32 x i32> %v) @@ -2486,6 +2617,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.xor.v64i32(<64 x i32> %v) @@ -2503,6 +2635,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v1i64: @@ -2510,6 +2643,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v1i64(<1 x i64> %v) @@ -2532,6 +2666,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v2i64: @@ -2543,6 +2678,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v) @@ -2565,6 +2701,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v4i64: @@ -2576,6 +2713,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v) @@ -2598,6 +2736,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v8i64: @@ -2609,6 +2748,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v8i64(<8 x i64> %v) @@ -2631,6 +2771,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v16i64: @@ -2642,6 +2783,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> %v) @@ -2667,6 +2809,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v32i64: @@ -2681,6 +2824,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.xor.v32i64(<32 x i64> %v) @@ -2746,6 +2890,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v1i8(<1 x i8> %v) @@ -2765,6 +2910,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %v) @@ -2784,6 +2930,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %v) @@ -2803,6 +2950,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v) @@ -2822,6 +2970,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %v) @@ -2842,6 +2991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %v) @@ -2862,6 +3012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v64i8(<64 x i8> %v) @@ -2882,6 +3033,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v128i8(<128 x i8> %v) @@ -2905,6 +3057,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.smin.v256i8(<256 x i8> %v) @@ -2919,6 +3072,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v1i16(<1 x i16> %v) @@ -2939,6 +3093,7 @@ ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i16: @@ -2952,6 +3107,7 @@ ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %v) @@ -2972,6 +3128,7 @@ ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i16: @@ -2985,6 +3142,7 @@ ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v) @@ -3005,6 +3163,7 @@ ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i16: @@ -3018,6 +3177,7 @@ ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %v) @@ -3038,6 +3198,7 @@ ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i16: @@ -3051,6 +3212,7 @@ ; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v) @@ -3072,6 +3234,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i16: @@ -3086,6 +3249,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v32i16(<32 x i16> %v) @@ -3107,6 +3271,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v64i16: @@ -3121,6 +3286,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v64i16(<64 x i16> %v) @@ -3145,6 +3311,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v128i16: @@ -3162,6 +3329,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.smin.v128i16(<128 x i16> %v) @@ -3176,6 +3344,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v1i32(<1 x i32> %v) @@ -3196,6 +3365,7 @@ ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i32: @@ -3209,6 +3379,7 @@ ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v) @@ -3229,6 +3400,7 @@ ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i32: @@ -3242,6 +3414,7 @@ ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v) @@ -3262,6 +3435,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i32: @@ -3275,6 +3449,7 @@ ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v) @@ -3295,6 +3470,7 @@ ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i32: @@ -3308,6 +3484,7 @@ ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %v) @@ -3329,6 +3506,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i32: @@ -3343,6 +3521,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v32i32(<32 x i32> %v) @@ -3367,6 +3546,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v64i32: @@ -3384,6 +3564,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.smin.v64i32(<64 x i32> %v) @@ -3401,6 +3582,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v1i64: @@ -3408,6 +3590,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v1i64(<1 x i64> %v) @@ -3439,6 +3622,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v2i64: @@ -3452,6 +3636,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v) @@ -3483,6 +3668,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v4i64: @@ -3496,6 +3682,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v) @@ -3527,6 +3714,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v8i64: @@ -3540,6 +3728,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v8i64(<8 x i64> %v) @@ -3571,6 +3760,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v16i64: @@ -3584,6 +3774,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %v) @@ -3618,6 +3809,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smin_v32i64: @@ -3634,6 +3826,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.smin.v32i64(<32 x i64> %v) @@ -3709,6 +3902,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v1i8(<1 x i8> %v) @@ -3728,6 +3922,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %v) @@ -3747,6 +3942,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %v) @@ -3766,6 +3962,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v) @@ -3785,6 +3982,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %v) @@ -3805,6 +4003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %v) @@ -3825,6 +4024,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v64i8(<64 x i8> %v) @@ -3845,6 +4045,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v128i8(<128 x i8> %v) @@ -3868,6 +4069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.smax.v256i8(<256 x i8> %v) @@ -3882,6 +4084,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v1i16(<1 x i16> %v) @@ -3901,6 +4104,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %v) @@ -3920,6 +4124,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v) @@ -3939,6 +4144,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %v) @@ -3958,6 +4164,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v) @@ -3978,6 +4185,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v32i16(<32 x i16> %v) @@ -3998,6 +4206,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v64i16(<64 x i16> %v) @@ -4021,6 +4230,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.smax.v128i16(<128 x i16> %v) @@ -4035,6 +4245,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v1i32(<1 x i32> %v) @@ -4054,6 +4265,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v) @@ -4073,6 +4285,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v) @@ -4092,6 +4305,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v) @@ -4111,6 +4325,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %v) @@ -4131,6 +4346,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v32i32(<32 x i32> %v) @@ -4154,6 +4370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.smax.v64i32(<64 x i32> %v) @@ -4171,6 +4388,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v1i64: @@ -4178,6 +4396,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v1i64(<1 x i64> %v) @@ -4207,6 +4426,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v2i64: @@ -4220,6 +4440,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v) @@ -4249,6 +4470,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v4i64: @@ -4262,6 +4484,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v) @@ -4291,6 +4514,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v8i64: @@ -4304,6 +4528,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v8i64(<8 x i64> %v) @@ -4333,6 +4558,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v16i64: @@ -4346,6 +4572,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %v) @@ -4378,6 +4605,7 @@ ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_smax_v32i64: @@ -4394,6 +4622,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.smax.v32i64(<32 x i64> %v) @@ -4467,6 +4696,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v1i8(<1 x i8> %v) @@ -4485,6 +4715,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v) @@ -4503,6 +4734,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v) @@ -4521,6 +4753,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) @@ -4539,6 +4772,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v) @@ -4558,6 +4792,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %v) @@ -4577,6 +4812,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v64i8(<64 x i8> %v) @@ -4596,6 +4832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v128i8(<128 x i8> %v) @@ -4618,6 +4855,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.umin.v256i8(<256 x i8> %v) @@ -4632,6 +4870,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v1i16(<1 x i16> %v) @@ -4650,6 +4889,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v) @@ -4668,6 +4908,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v) @@ -4686,6 +4927,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v) @@ -4704,6 +4946,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v) @@ -4723,6 +4966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v32i16(<32 x i16> %v) @@ -4742,6 +4986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v64i16(<64 x i16> %v) @@ -4764,6 +5009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.umin.v128i16(<128 x i16> %v) @@ -4778,6 +5024,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v1i32(<1 x i32> %v) @@ -4796,6 +5043,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v) @@ -4814,6 +5062,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v) @@ -4832,6 +5081,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v) @@ -4850,6 +5100,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %v) @@ -4869,6 +5120,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v32i32(<32 x i32> %v) @@ -4891,6 +5143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.umin.v64i32(<64 x i32> %v) @@ -4908,6 +5161,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v1i64: @@ -4915,6 +5169,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v1i64(<1 x i64> %v) @@ -4937,6 +5192,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v2i64: @@ -4948,6 +5204,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v) @@ -4970,6 +5227,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v4i64: @@ -4981,6 +5239,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v) @@ -5003,6 +5262,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v8i64: @@ -5014,6 +5274,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v8i64(<8 x i64> %v) @@ -5036,6 +5297,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v16i64: @@ -5047,6 +5309,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %v) @@ -5072,6 +5335,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v32i64: @@ -5086,6 +5350,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.umin.v32i64(<32 x i64> %v) @@ -5151,6 +5416,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i8>, <1 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %v) @@ -5169,6 +5435,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v) @@ -5187,6 +5454,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v) @@ -5205,6 +5473,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v) @@ -5223,6 +5492,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v) @@ -5242,6 +5512,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v) @@ -5261,6 +5532,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v64i8(<64 x i8> %v) @@ -5280,6 +5552,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i8>, <128 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v128i8(<128 x i8> %v) @@ -5302,6 +5575,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <256 x i8>, <256 x i8>* %x %red = call i8 @llvm.vector.reduce.umax.v256i8(<256 x i8> %v) @@ -5316,6 +5590,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i16>, <1 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> %v) @@ -5334,6 +5609,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v) @@ -5352,6 +5628,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v) @@ -5370,6 +5647,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v) @@ -5388,6 +5666,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v) @@ -5407,6 +5686,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v32i16(<32 x i16> %v) @@ -5426,6 +5706,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i16>, <64 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v64i16(<64 x i16> %v) @@ -5448,6 +5729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <128 x i16>, <128 x i16>* %x %red = call i16 @llvm.vector.reduce.umax.v128i16(<128 x i16> %v) @@ -5462,6 +5744,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <1 x i32>, <1 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> %v) @@ -5480,6 +5763,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v) @@ -5498,6 +5782,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v) @@ -5516,6 +5801,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v) @@ -5534,6 +5820,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %v) @@ -5553,6 +5840,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <32 x i32>, <32 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v32i32(<32 x i32> %v) @@ -5575,6 +5863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load <64 x i32>, <64 x i32>* %x %red = call i32 @llvm.vector.reduce.umax.v64i32(<64 x i32> %v) @@ -5592,6 +5881,7 @@ ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v1i64: @@ -5599,6 +5889,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <1 x i64>, <1 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> %v) @@ -5621,6 +5912,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v2i64: @@ -5632,6 +5924,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <2 x i64>, <2 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v) @@ -5654,6 +5947,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v4i64: @@ -5665,6 +5959,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <4 x i64>, <4 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v) @@ -5687,6 +5982,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v8i64: @@ -5698,6 +5994,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <8 x i64>, <8 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v8i64(<8 x i64> %v) @@ -5720,6 +6017,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v16i64: @@ -5731,6 +6029,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <16 x i64>, <16 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %v) @@ -5756,6 +6055,7 @@ ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v32i64: @@ -5770,6 +6070,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x %red = call i64 @llvm.vector.reduce.umax.v32i64(<32 x i64> %v) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r @@ -34,6 +35,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r @@ -51,6 +53,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r @@ -68,6 +71,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r @@ -86,6 +90,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r @@ -103,6 +108,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r @@ -120,6 +126,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r @@ -138,6 +145,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r @@ -155,6 +163,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r @@ -172,6 +181,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r @@ -190,6 +200,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r @@ -207,6 +218,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r @@ -224,6 +236,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r @@ -242,6 +255,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r @@ -259,6 +273,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x half> %a, <2 x half> %b ret <2 x half> %v @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, <2 x half> %c, <2 x half> %d @@ -40,6 +42,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x half> %a, <4 x half> %b ret <4 x half> %v @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, <4 x half> %c, <4 x half> %d @@ -68,6 +72,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x half> %a, <8 x half> %b ret <8 x half> %v @@ -82,6 +87,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, <8 x half> %c, <8 x half> %d @@ -96,6 +102,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x half> %a, <16 x half> %b ret <16 x half> %v @@ -110,6 +117,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, <16 x half> %c, <16 x half> %d @@ -124,6 +132,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x float> %a, <2 x float> %b ret <2 x float> %v @@ -138,6 +147,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, <2 x float> %c, <2 x float> %d @@ -152,6 +162,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x float> %a, <4 x float> %b ret <4 x float> %v @@ -166,6 +177,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, <4 x float> %c, <4 x float> %d @@ -180,6 +192,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x float> %a, <8 x float> %b ret <8 x float> %v @@ -194,6 +207,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, <8 x float> %c, <8 x float> %d @@ -208,6 +222,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x float> %a, <16 x float> %b ret <16 x float> %v @@ -222,6 +237,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, <16 x float> %c, <16 x float> %d @@ -236,6 +252,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x double> %a, <2 x double> %b ret <2 x double> %v @@ -250,6 +267,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, <2 x double> %c, <2 x double> %d @@ -264,6 +282,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x double> %a, <4 x double> %b ret <4 x double> %v @@ -278,6 +297,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, <4 x double> %c, <4 x double> %d @@ -292,6 +312,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x double> %a, <8 x double> %b ret <8 x double> %v @@ -306,6 +327,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, <8 x double> %c, <8 x double> %d @@ -320,6 +342,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x double> %a, <16 x double> %b ret <16 x double> %v @@ -334,6 +357,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, <16 x double> %c, <16 x double> %d diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <1 x i1> %a, <1 x i1> %b ret <1 x i1> %v @@ -29,6 +30,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <1 x i1> %c, <1 x i1> %d @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i1> %a, <2 x i1> %b ret <2 x i1> %v @@ -60,6 +63,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <2 x i1> %c, <2 x i1> %d @@ -75,6 +79,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i1> %a, <4 x i1> %b ret <4 x i1> %v @@ -91,6 +96,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <4 x i1> %c, <4 x i1> %d @@ -106,6 +112,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i1> %a, <8 x i1> %b ret <8 x i1> %v @@ -122,6 +129,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <8 x i1> %c, <8 x i1> %d @@ -137,6 +145,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i1> %a, <16 x i1> %b ret <16 x i1> %v @@ -153,6 +162,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, <16 x i1> %c, <16 x i1> %d @@ -166,6 +176,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i8> %a, <2 x i8> %b ret <2 x i8> %v @@ -180,6 +191,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, <2 x i8> %c, <2 x i8> %d @@ -193,6 +205,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i8> %a, <4 x i8> %b ret <4 x i8> %v @@ -207,6 +220,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, <4 x i8> %c, <4 x i8> %d @@ -220,6 +234,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i8> %a, <8 x i8> %b ret <8 x i8> %v @@ -234,6 +249,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, <8 x i8> %c, <8 x i8> %d @@ -247,6 +263,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i8> %a, <16 x i8> %b ret <16 x i8> %v @@ -261,6 +278,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, <16 x i8> %c, <16 x i8> %d @@ -275,6 +293,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i16> %a, <2 x i16> %b ret <2 x i16> %v @@ -290,6 +309,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, <2 x i16> %c, <2 x i16> %d @@ -304,6 +324,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i16> %a, <4 x i16> %b ret <4 x i16> %v @@ -319,6 +340,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, <4 x i16> %c, <4 x i16> %d @@ -333,6 +355,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i16> %a, <8 x i16> %b ret <8 x i16> %v @@ -348,6 +371,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, <8 x i16> %c, <8 x i16> %d @@ -362,6 +386,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i16> %a, <16 x i16> %b ret <16 x i16> %v @@ -377,6 +402,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, <16 x i16> %c, <16 x i16> %d @@ -391,6 +417,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i32> %a, <2 x i32> %b ret <2 x i32> %v @@ -406,6 +433,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, <2 x i32> %c, <2 x i32> %d @@ -420,6 +448,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i32> %a, <4 x i32> %b ret <4 x i32> %v @@ -435,6 +464,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, <4 x i32> %c, <4 x i32> %d @@ -449,6 +479,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i32> %a, <8 x i32> %b ret <8 x i32> %v @@ -464,6 +495,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, <8 x i32> %c, <8 x i32> %d @@ -478,6 +510,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i32> %a, <16 x i32> %b ret <16 x i32> %v @@ -493,6 +526,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, <16 x i32> %c, <16 x i32> %d @@ -507,6 +541,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i64> %a, <2 x i64> %b ret <2 x i64> %v @@ -524,6 +559,7 @@ ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_v2i64: @@ -535,6 +571,7 @@ ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, <2 x i64> %c, <2 x i64> %d @@ -549,6 +586,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i64> %a, <4 x i64> %b ret <4 x i64> %v @@ -566,6 +604,7 @@ ; RV32-NEXT: vmsne.vi v0, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_v4i64: @@ -577,6 +616,7 @@ ; RV64-NEXT: vmsne.vi v0, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, <4 x i64> %c, <4 x i64> %d @@ -591,6 +631,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i64> %a, <8 x i64> %b ret <8 x i64> %v @@ -608,6 +649,7 @@ ; RV32-NEXT: vmsne.vi v0, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_v8i64: @@ -619,6 +661,7 @@ ; RV64-NEXT: vmsne.vi v0, v16, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, <8 x i64> %c, <8 x i64> %d @@ -633,6 +676,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i64> %a, <16 x i64> %b ret <16 x i64> %v @@ -650,6 +694,7 @@ ; RV32-NEXT: vmsne.vi v0, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_v16i64: @@ -661,6 +706,7 @@ ; RV64-NEXT: vmsne.vi v0, v24, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64-NEXT: vmerge.vvm v8, v16, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, <16 x i64> %c, <16 x i64> %d diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() ret <4 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() ret <8 x i8> %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() ret <16 x i8> %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() ret <2 x i16> %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() ret <4 x i16> %v @@ -81,6 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() ret <8 x i16> %v @@ -94,12 +101,14 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 8 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i16: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() ret <16 x i16> %v @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() ret <2 x i32> %v @@ -124,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() ret <4 x i32> %v @@ -137,12 +148,14 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v8i32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() ret <8 x i32> %v @@ -158,6 +171,7 @@ ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: vadd.vi v10, v8, 8 ; LMULMAX1-NEXT: vadd.vi v11, v8, 12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i32: @@ -165,6 +179,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <16 x i32> @llvm.experimental.stepvector.v16i32() ret <16 x i32> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() ret <4 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() ret <8 x i8> %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() ret <16 x i8> %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() ret <2 x i16> %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() ret <4 x i16> %v @@ -81,6 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() ret <8 x i16> %v @@ -94,12 +101,14 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 8 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i16: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() ret <16 x i16> %v @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() ret <2 x i32> %v @@ -124,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() ret <4 x i32> %v @@ -137,12 +148,14 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v8i32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() ret <8 x i32> %v @@ -158,6 +171,7 @@ ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: vadd.vi v10, v8, 8 ; LMULMAX1-NEXT: vadd.vi v11, v8, 12 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i32: @@ -165,6 +179,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <16 x i32> @llvm.experimental.stepvector.v16i32() ret <16 x i32> %v @@ -177,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.experimental.stepvector.v2i64() ret <2 x i64> %v @@ -190,12 +206,14 @@ ; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 2 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v4i64: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <4 x i64> @llvm.experimental.stepvector.v4i64() ret <4 x i64> %v @@ -211,6 +229,7 @@ ; LMULMAX1-NEXT: vadd.vi v9, v8, 2 ; LMULMAX1-NEXT: vadd.vi v10, v8, 4 ; LMULMAX1-NEXT: vadd.vi v11, v8, 6 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v8i64: @@ -218,6 +237,7 @@ ; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 4 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <8 x i64> @llvm.experimental.stepvector.v8i64() ret <8 x i64> %v @@ -237,6 +257,7 @@ ; LMULMAX1-NEXT: vadd.vi v13, v8, 10 ; LMULMAX1-NEXT: vadd.vi v14, v8, 12 ; LMULMAX1-NEXT: vadd.vi v15, v8, 14 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i64: @@ -246,6 +267,7 @@ ; LMULMAX2-NEXT: vadd.vi v10, v8, 4 ; LMULMAX2-NEXT: vadd.vi v12, v8, 8 ; LMULMAX2-NEXT: vadd.vi v14, v8, 12 +; LMULMAX2-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX2-NEXT: ret %v = call <16 x i64> @llvm.experimental.stepvector.v16i64() ret <16 x i64> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -9,12 +9,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vle8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: load_v4i32_align1: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vle8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %z = load <4 x i32>, <4 x i32>* %ptr, align 1 ret <4 x i32> %z @@ -25,12 +27,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vle8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: load_v4i32_align2: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vle8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %z = load <4 x i32>, <4 x i32>* %ptr, align 2 ret <4 x i32> %z @@ -41,12 +45,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: store_v4i32_align1: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <4 x i32> %x, <4 x i32>* %ptr, align 1 ret void @@ -57,12 +63,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV32-NEXT: vse8.v v8, (a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: store_v4i32_align2: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64-NEXT: vse8.v v8, (a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret store <4 x i32> %x, <4 x i32>* %ptr, align 2 ret void @@ -116,6 +124,7 @@ ; RV32-NEXT: .LBB4_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_align1: @@ -163,6 +172,7 @@ ; RV64-NEXT: .LBB4_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 1, <2 x i1> %m, <2 x i16> %passthru) ret <2 x i16> %v @@ -216,6 +226,7 @@ ; RV32-NEXT: .LBB5_4: # %else2 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i64_align4: @@ -263,6 +274,7 @@ ; RV64-NEXT: .LBB5_4: # %else2 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, i32 4, <2 x i1> %m, <2 x i64> %passthru) ret <2 x i64> %v @@ -300,6 +312,7 @@ ; RV32-NEXT: bnez a0, .LBB6_8 ; RV32-NEXT: .LBB6_4: # %else6 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB6_5: # %cond.store ; RV32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu @@ -346,6 +359,7 @@ ; RV32-NEXT: srli a0, a0, 8 ; RV32-NEXT: sb a0, 1(a1) ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i16_align1: @@ -377,6 +391,7 @@ ; RV64-NEXT: bnez a0, .LBB6_8 ; RV64-NEXT: .LBB6_4: # %else6 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB6_5: # %cond.store ; RV64-NEXT: vsetivli zero, 0, e16, mf2, ta, mu @@ -423,6 +438,7 @@ ; RV64-NEXT: srli a0, a0, 8 ; RV64-NEXT: sb a0, 1(a1) ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 1, <4 x i1> %m) ret void @@ -454,6 +470,7 @@ ; RV32-NEXT: bnez a0, .LBB7_4 ; RV32-NEXT: .LBB7_2: # %else2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB7_3: # %cond.store ; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu @@ -474,6 +491,7 @@ ; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: sh a0, 2(a1) ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_align2: @@ -499,6 +517,7 @@ ; RV64-NEXT: bnez a0, .LBB7_4 ; RV64-NEXT: .LBB7_2: # %else2 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB7_3: # %cond.store ; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu @@ -521,6 +540,7 @@ ; RV64-NEXT: srli a0, a0, 16 ; RV64-NEXT: sh a0, 2(a1) ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, i32 2, <2 x i1> %m) ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.add.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -133,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl) ret <5 x i8> %v @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <5 x i1> undef, i1 true, i32 0 %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0 %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer @@ -209,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0 %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0 %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0 %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -285,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -299,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -337,6 +363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -361,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -375,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -421,6 +452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> undef, i8 -1, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> undef, <256 x i32> zeroinitializer @@ -446,6 +478,7 @@ ; CHECK-NEXT: .LBB32_4: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> undef, i8 -1, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> undef, <256 x i32> zeroinitializer @@ -467,6 +500,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> undef, i8 -1, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> undef, <256 x i32> zeroinitializer @@ -482,6 +516,7 @@ ; CHECK-NEXT: addi a0, zero, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> undef, i8 -1, i32 0 %vb = shufflevector <256 x i8> %elt.head, <256 x i8> undef, <256 x i32> zeroinitializer @@ -496,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -506,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -518,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -530,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -544,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -556,6 +596,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -572,6 +613,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -582,6 +624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -594,6 +637,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -606,6 +650,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -620,6 +665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -632,6 +678,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -648,6 +695,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -658,6 +706,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -670,6 +719,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -682,6 +732,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -696,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -708,6 +760,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -724,6 +777,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -734,6 +788,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -746,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -758,6 +814,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -772,6 +829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -784,6 +842,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -800,6 +859,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -810,6 +870,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -822,6 +883,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -834,6 +896,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -848,6 +911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -860,6 +924,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -876,6 +941,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -886,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -898,6 +965,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -910,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -924,6 +993,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -936,6 +1006,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -952,6 +1023,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -962,6 +1034,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -974,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -986,6 +1060,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1000,6 +1075,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1012,6 +1088,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1028,6 +1105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -1038,6 +1116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1050,6 +1129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1062,6 +1142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1076,6 +1157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1088,6 +1170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1104,6 +1187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -1114,6 +1198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -1134,12 +1219,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1160,12 +1247,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1180,6 +1269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1192,6 +1282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1208,6 +1299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1218,6 +1310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1238,12 +1331,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1264,12 +1359,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1284,6 +1381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1296,6 +1394,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1312,6 +1411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1322,6 +1422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1342,12 +1443,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1368,12 +1471,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1388,6 +1493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1400,6 +1506,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1416,6 +1523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1426,6 +1534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1446,12 +1555,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1472,12 +1583,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1492,6 +1605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1504,6 +1618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1542,6 +1657,7 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v32i64: @@ -1565,6 +1681,7 @@ ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> undef, i64 -1, i32 0 %vb = shufflevector <32 x i64> %elt.head, <32 x i64> undef, <32 x i32> zeroinitializer @@ -1593,6 +1710,7 @@ ; RV32-NEXT: .LBB108_4: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vi_v32i64_unmasked: @@ -1612,6 +1730,7 @@ ; RV64-NEXT: .LBB108_4: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vadd.vi v8, v8, -1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> undef, i64 -1, i32 0 %vb = shufflevector <32 x i64> %elt.head, <32 x i64> undef, <32 x i32> zeroinitializer @@ -1631,12 +1750,14 @@ ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v32i64_evl12: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, mu ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> undef, i64 -1, i32 0 %vb = shufflevector <32 x i64> %elt.head, <32 x i64> undef, <32 x i32> zeroinitializer @@ -1657,6 +1778,7 @@ ; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: vadd.vv v16, v16, v24, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v32i64_evl27: @@ -1668,6 +1790,7 @@ ; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> undef, i64 -1, i32 0 %vb = shufflevector <32 x i64> %elt.head, <32 x i64> undef, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.and.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 4, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 4, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -133,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 4, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 4, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -209,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 4, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 4, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -285,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -299,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 4, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 4, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -337,6 +363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -361,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -375,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 4, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 4, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -413,6 +445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -437,6 +471,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -451,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 4, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 4, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -479,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -489,6 +527,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -513,6 +553,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -527,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 4, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 4, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -555,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -565,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -589,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -603,6 +650,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 4, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 4, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -631,6 +680,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -641,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -665,6 +717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -679,6 +732,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 4, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 4, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -707,6 +762,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -717,6 +773,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -729,6 +786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -741,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -755,6 +814,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 4, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -767,6 +827,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 4, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -783,6 +844,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -805,6 +868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -817,6 +881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -831,6 +896,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 4, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -843,6 +909,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 4, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -859,6 +926,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -869,6 +937,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -881,6 +950,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -893,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -907,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 4, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -919,6 +991,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 4, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -935,6 +1008,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -945,6 +1019,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -965,12 +1040,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -991,12 +1068,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1011,6 +1090,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 4, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1023,6 +1103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 4, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1039,6 +1120,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1049,6 +1131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1069,12 +1152,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1095,12 +1180,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1115,6 +1202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 4, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1127,6 +1215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 4, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1143,6 +1232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1153,6 +1243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1173,12 +1264,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1199,12 +1292,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1219,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 4, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1231,6 +1327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 4, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1247,6 +1344,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl) ret <11 x i64> %v @@ -1257,6 +1355,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <11 x i1> undef, i1 true, i32 0 %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer @@ -1280,12 +1379,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v16 ; RV32-NEXT: vand.vv v8, v8, v24, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v11i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer @@ -1307,12 +1408,14 @@ ; RV32-NEXT: vmerge.vxm v16, v16, a0, v0 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v11i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer @@ -1327,6 +1430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <11 x i64> undef, i64 4, i32 0 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer @@ -1339,6 +1443,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <11 x i64> undef, i64 4, i32 0 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer @@ -1355,6 +1460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1365,6 +1471,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1385,12 +1492,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1411,12 +1520,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1431,6 +1542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 4, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1443,6 +1555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 4, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -28,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -38,6 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -50,6 +53,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -62,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -78,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -88,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -100,6 +107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -112,6 +120,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -128,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) ret <6 x i8> %v @@ -140,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -150,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -162,6 +174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -174,6 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -190,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -200,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -212,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -224,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -240,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -250,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -262,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -274,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -290,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -300,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -312,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -324,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -340,6 +366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -350,6 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -362,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -374,6 +403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -390,6 +420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -400,6 +431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -412,6 +444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -424,6 +457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -440,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -450,6 +485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -462,6 +498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -490,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -500,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -524,6 +565,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -540,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -562,6 +606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -574,6 +619,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -590,6 +636,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -600,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -612,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -624,6 +673,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -640,6 +690,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -650,6 +701,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -670,12 +722,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -696,12 +750,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -718,6 +774,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -728,6 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -748,12 +806,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -774,12 +834,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -796,6 +858,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -806,6 +869,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -826,12 +890,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -852,12 +918,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -874,6 +942,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -884,6 +953,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -904,12 +974,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -930,12 +1002,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -27,6 +28,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -37,6 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -49,6 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -61,6 +65,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -87,6 +93,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) ret <6 x i8> %v @@ -139,6 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -173,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -239,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -249,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -289,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -299,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -323,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -361,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -373,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -399,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +443,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -423,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -439,6 +473,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -449,6 +484,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -461,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -473,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -489,6 +527,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -499,6 +538,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -511,6 +551,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -523,6 +564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -549,6 +592,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -561,6 +605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -573,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -589,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -599,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -611,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -623,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -639,6 +689,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -649,6 +700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -669,12 +721,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -695,12 +749,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -717,6 +773,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -727,6 +784,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -747,12 +805,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -773,12 +833,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -795,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -805,6 +868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -825,12 +889,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -851,12 +917,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -873,6 +941,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -883,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -903,12 +973,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -929,12 +1001,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -63,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fadd.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) ret <3 x half> %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) ret <4 x half> %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -151,6 +162,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -163,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -179,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) ret <16 x half> %v @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -203,6 +218,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -231,6 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) ret <2 x float> %v @@ -241,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -255,6 +274,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -283,6 +304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -307,6 +330,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -319,6 +343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -335,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -359,6 +386,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -371,6 +399,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -387,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) ret <16 x float> %v @@ -397,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +442,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -423,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -439,6 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -449,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -463,6 +498,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -475,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -491,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -501,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -515,6 +554,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -527,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -543,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -553,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -567,6 +610,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -579,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -595,6 +640,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) ret <16 x double> %v @@ -605,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -619,6 +666,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -631,6 +679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -63,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fdiv.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) ret <3 x half> %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) ret <4 x half> %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -151,6 +162,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -163,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -179,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) ret <16 x half> %v @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -203,6 +218,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -231,6 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) ret <2 x float> %v @@ -241,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -255,6 +274,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -283,6 +304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -307,6 +330,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -319,6 +343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -335,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -359,6 +386,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -371,6 +399,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -387,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) ret <16 x float> %v @@ -397,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +442,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -423,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -439,6 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -449,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -463,6 +498,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -475,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -491,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -501,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -515,6 +554,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -527,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -543,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -553,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -567,6 +610,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -579,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -595,6 +640,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) ret <16 x double> %v @@ -605,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -619,6 +666,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -631,6 +679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x half> undef, half %b, i32 0 %splat = shufflevector <2 x half> %head, <2 x half> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.maxnum.v4f16(<4 x half> %a, <4 x half> %b) ret <4 x half> %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x half> undef, half %b, i32 0 %splat = shufflevector <4 x half> %head, <4 x half> undef, <4 x i32> zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.maxnum.v8f16(<8 x half> %a, <8 x half> %b) ret <8 x half> %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x half> undef, half %b, i32 0 %splat = shufflevector <8 x half> %head, <8 x half> undef, <8 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.maxnum.v16f16(<16 x half> %a, <16 x half> %b) ret <16 x half> %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x half> undef, half %b, i32 0 %splat = shufflevector <16 x half> %head, <16 x half> undef, <16 x i32> zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %b) ret <2 x float> %v @@ -117,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x float> undef, float %b, i32 0 %splat = shufflevector <2 x float> %head, <2 x float> undef, <2 x i32> zeroinitializer @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b) ret <4 x float> %v @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x float> undef, float %b, i32 0 %splat = shufflevector <4 x float> %head, <4 x float> undef, <4 x i32> zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %b) ret <8 x float> %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x float> undef, float %b, i32 0 %splat = shufflevector <8 x float> %head, <8 x float> undef, <8 x i32> zeroinitializer @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %b) ret <16 x float> %v @@ -189,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x float> undef, float %b, i32 0 %splat = shufflevector <16 x float> %head, <16 x float> undef, <16 x i32> zeroinitializer @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b) ret <2 x double> %v @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x double> undef, double %b, i32 0 %splat = shufflevector <2 x double> %head, <2 x double> undef, <2 x i32> zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %b) ret <4 x double> %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x double> undef, double %b, i32 0 %splat = shufflevector <4 x double> %head, <4 x double> undef, <4 x i32> zeroinitializer @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) ret <8 x double> %v @@ -261,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x double> undef, double %b, i32 0 %splat = shufflevector <8 x double> %head, <8 x double> undef, <8 x i32> zeroinitializer @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %b) ret <16 x double> %v @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x double> undef, double %b, i32 0 %splat = shufflevector <16 x double> %head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x half> undef, half %b, i32 0 %splat = shufflevector <2 x half> %head, <2 x half> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %b) ret <4 x half> %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x half> undef, half %b, i32 0 %splat = shufflevector <4 x half> %head, <4 x half> undef, <4 x i32> zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.minnum.v8f16(<8 x half> %a, <8 x half> %b) ret <8 x half> %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x half> undef, half %b, i32 0 %splat = shufflevector <8 x half> %head, <8 x half> undef, <8 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.minnum.v16f16(<16 x half> %a, <16 x half> %b) ret <16 x half> %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x half> undef, half %b, i32 0 %splat = shufflevector <16 x half> %head, <16 x half> undef, <16 x i32> zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %b) ret <2 x float> %v @@ -117,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x float> undef, float %b, i32 0 %splat = shufflevector <2 x float> %head, <2 x float> undef, <2 x i32> zeroinitializer @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b) ret <4 x float> %v @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x float> undef, float %b, i32 0 %splat = shufflevector <4 x float> %head, <4 x float> undef, <4 x i32> zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %b) ret <8 x float> %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x float> undef, float %b, i32 0 %splat = shufflevector <8 x float> %head, <8 x float> undef, <8 x i32> zeroinitializer @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %b) ret <16 x float> %v @@ -189,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x float> undef, float %b, i32 0 %splat = shufflevector <16 x float> %head, <16 x float> undef, <16 x i32> zeroinitializer @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b) ret <2 x double> %v @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x double> undef, double %b, i32 0 %splat = shufflevector <2 x double> %head, <2 x double> undef, <2 x i32> zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %b) ret <4 x double> %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x double> undef, double %b, i32 0 %splat = shufflevector <4 x double> %head, <4 x double> undef, <4 x i32> zeroinitializer @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) ret <8 x double> %v @@ -261,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x double> undef, double %b, i32 0 %splat = shufflevector <8 x double> %head, <8 x double> undef, <8 x i32> zeroinitializer @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %b) ret <16 x double> %v @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x double> undef, double %b, i32 0 %splat = shufflevector <16 x double> %head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -63,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fmul.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) ret <3 x half> %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) ret <4 x half> %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -151,6 +162,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -163,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -179,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) ret <16 x half> %v @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -203,6 +218,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -231,6 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) ret <2 x float> %v @@ -241,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -255,6 +274,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -283,6 +304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -307,6 +330,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -319,6 +343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -335,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -359,6 +386,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -371,6 +399,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -387,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) ret <16 x float> %v @@ -397,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +442,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -423,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -439,6 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -449,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -463,6 +498,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -475,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -491,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -501,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -515,6 +554,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -527,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -543,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -553,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -567,6 +610,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -579,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -595,6 +640,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) ret <16 x double> %v @@ -605,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -619,6 +666,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -631,6 +679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -43,6 +45,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -55,6 +58,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -73,6 +77,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -85,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -103,6 +109,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -115,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -133,6 +141,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -145,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -163,6 +173,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -175,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -193,6 +205,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -205,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -223,6 +237,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -235,6 +250,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -253,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -265,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -283,6 +301,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -295,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -313,6 +333,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -325,6 +346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -343,6 +365,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -355,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -43,6 +45,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -55,6 +58,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -73,6 +77,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -85,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -103,6 +109,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -115,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -133,6 +141,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -145,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -163,6 +173,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -175,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -193,6 +205,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -205,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -223,6 +237,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -235,6 +250,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -253,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -265,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -283,6 +301,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -295,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -313,6 +333,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -325,6 +346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -343,6 +365,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -355,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) ret <2 x half> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> undef, half %b, i32 0 %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer @@ -63,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) ret <3 x half> %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) ret <4 x half> %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> undef, half %b, i32 0 %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -151,6 +162,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -163,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> undef, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer @@ -179,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) ret <16 x half> %v @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -203,6 +218,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> undef, half %b, i32 0 %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer @@ -231,6 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) ret <2 x float> %v @@ -241,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -255,6 +274,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> undef, float %b, i32 0 %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer @@ -283,6 +304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -307,6 +330,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -319,6 +343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> undef, float %b, i32 0 %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer @@ -335,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -359,6 +386,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -371,6 +399,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> undef, float %b, i32 0 %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer @@ -387,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) ret <16 x float> %v @@ -397,6 +427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +442,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -423,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> undef, float %b, i32 0 %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer @@ -439,6 +472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -449,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -463,6 +498,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -475,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> undef, double %b, i32 0 %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer @@ -491,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -501,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -515,6 +554,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -527,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> undef, double %b, i32 0 %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer @@ -543,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -553,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -567,6 +610,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -579,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> undef, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer @@ -595,6 +640,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) ret <16 x double> %v @@ -605,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -619,6 +666,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer @@ -631,6 +679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> undef, double %b, i32 0 %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.mul.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -123,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -133,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -145,6 +156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -157,6 +169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -173,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -183,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -195,6 +210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -207,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -245,6 +264,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -257,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -283,6 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -295,6 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -323,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -333,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -345,6 +372,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -357,6 +385,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -373,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl) ret <12 x i16> %v @@ -383,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <12 x i1> undef, i1 true, i32 0 %m = shufflevector <12 x i1> %head, <12 x i1> undef, <12 x i32> zeroinitializer @@ -395,6 +426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0 %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer @@ -407,6 +439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0 %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer @@ -423,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -433,6 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -445,6 +480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -457,6 +493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -473,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -483,6 +521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -495,6 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -507,6 +547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -523,6 +564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -533,6 +575,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -545,6 +588,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -557,6 +601,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -573,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -583,6 +629,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -595,6 +642,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -607,6 +655,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -623,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -633,6 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -645,6 +696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -657,6 +709,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -673,6 +726,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -683,6 +737,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -703,12 +758,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -729,12 +786,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -751,6 +810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -761,6 +821,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -781,12 +842,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -807,12 +870,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -829,6 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -839,6 +905,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -859,12 +926,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -885,12 +954,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -907,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -917,6 +989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -937,12 +1010,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -963,12 +1038,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %insert = insertelement <8 x i16> undef, i16 %y, i16 0 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer @@ -20,6 +21,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %insert = insertelement <4 x i32> undef, i32 %y, i32 0 %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsra.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; RV32-LABEL: vnsra_v2i64_v2i32_scalar: ; RV32: # %bb.0: @@ -66,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = ashr <8 x i16> %x, %b = trunc <8 x i16> %a to <8 x i8> @@ -77,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = ashr <4 x i32> %x, %b = trunc <4 x i32> %a to <4 x i16> @@ -88,6 +93,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = ashr <2 x i64> %x, %b = trunc <2 x i64> %a to <2 x i32> @@ -99,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %insert = insertelement <8 x i16> undef, i16 %y, i16 0 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer @@ -112,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %insert = insertelement <4 x i32> undef, i32 %y, i32 0 %splat = shufflevector <4 x i32> %insert, <4 x i32> undef, <4 x i32> zeroinitializer @@ -125,6 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; RV32-LABEL: vnsrl_v2i64_v2i32_scalar: ; RV32: # %bb.0: @@ -158,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = lshr <8 x i16> %x, %b = trunc <8 x i16> %a to <8 x i8> @@ -169,6 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = lshr <4 x i32> %x, %b = trunc <4 x i32> %a to <4 x i16> @@ -180,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = lshr <2 x i64> %x, %b = trunc <2 x i64> %a to <2 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.or.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 5, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 5, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -133,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 5, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 5, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) ret <7 x i8> %v @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <7 x i1> undef, i1 true, i32 0 %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0 %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer @@ -209,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0 %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> undef, i8 5, i32 0 %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> undef, i8 5, i32 0 %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -285,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -299,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 5, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 5, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -337,6 +363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -361,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -375,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 5, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 5, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -413,6 +445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -437,6 +471,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -451,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 5, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 5, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -479,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -489,6 +527,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -513,6 +553,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -527,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 5, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 5, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -555,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -565,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -589,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -603,6 +650,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 5, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 5, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -631,6 +680,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -641,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -665,6 +717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -679,6 +732,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 5, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 5, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -707,6 +762,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -717,6 +773,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -729,6 +786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -741,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -755,6 +814,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 5, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -767,6 +827,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 5, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -783,6 +844,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -805,6 +868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -817,6 +881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -831,6 +896,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 5, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -843,6 +909,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 5, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -859,6 +926,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -869,6 +937,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -881,6 +950,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -893,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -907,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 5, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -919,6 +991,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 5, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -935,6 +1008,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -945,6 +1019,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -957,6 +1032,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -969,6 +1045,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -983,6 +1060,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 5, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -995,6 +1073,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 5, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1011,6 +1090,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -1021,6 +1101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -1041,12 +1122,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1067,12 +1150,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1087,6 +1172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 5, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1099,6 +1185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 5, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1115,6 +1202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1125,6 +1213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1145,12 +1234,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1171,12 +1262,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1191,6 +1284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 5, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1203,6 +1297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 5, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1219,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1229,6 +1325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1249,12 +1346,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1275,12 +1374,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1295,6 +1396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 5, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1307,6 +1409,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 5, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1323,6 +1426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1333,6 +1437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1353,12 +1458,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1379,12 +1486,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1399,6 +1508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 5, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1411,6 +1521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 5, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -12,6 +12,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8: @@ -19,6 +20,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -31,6 +33,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i16: @@ -39,6 +42,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i8> %v to <2 x i16> @@ -52,6 +56,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i16: @@ -60,6 +65,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i8> %v to <2 x i16> @@ -73,6 +79,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i32: @@ -81,6 +88,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i8> %v to <2 x i32> @@ -94,6 +102,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i32: @@ -102,6 +111,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i8> %v to <2 x i32> @@ -115,6 +125,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsext.vf8 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_sextload_v2i64: @@ -123,6 +134,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsext.vf8 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i8> %v to <2 x i64> @@ -136,6 +148,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vzext.vf8 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i8_zextload_v2i64: @@ -144,6 +157,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vzext.vf8 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i8> %v to <2 x i64> @@ -158,6 +172,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i8: @@ -165,6 +180,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -176,6 +192,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i8: @@ -183,6 +200,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -198,6 +216,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i8: @@ -205,6 +224,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -217,6 +237,7 @@ ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8: @@ -225,6 +246,7 @@ ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs %v = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) @@ -239,6 +261,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16: @@ -246,6 +269,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -258,6 +282,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16_sextload_v2i32: @@ -266,6 +291,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i16> %v to <2 x i32> @@ -279,6 +305,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16_zextload_v2i32: @@ -287,6 +314,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i16> %v to <2 x i32> @@ -300,6 +328,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16_sextload_v2i64: @@ -308,6 +337,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i16> %v to <2 x i64> @@ -321,6 +351,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i16_zextload_v2i64: @@ -329,6 +360,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i16> %v to <2 x i64> @@ -343,6 +375,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i16: @@ -350,6 +383,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -361,6 +395,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i16: @@ -368,6 +403,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -383,6 +419,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i16: @@ -390,6 +427,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -403,6 +441,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i16: @@ -412,6 +451,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) @@ -426,6 +466,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i16: @@ -435,6 +476,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -450,6 +492,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i16: @@ -459,6 +502,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -474,6 +518,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i16: @@ -483,6 +528,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) @@ -496,6 +542,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i32: @@ -503,6 +550,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -515,6 +563,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i32_sextload_v2i64: @@ -523,6 +572,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) %ev = sext <2 x i32> %v to <2 x i64> @@ -536,6 +586,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i32_zextload_v2i64: @@ -544,6 +595,7 @@ ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) %ev = zext <2 x i32> %v to <2 x i64> @@ -557,6 +609,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i32: @@ -564,6 +617,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -574,6 +628,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i32: @@ -581,6 +636,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -595,6 +651,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i32: @@ -602,6 +659,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -615,6 +673,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i32: @@ -624,6 +683,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -638,6 +698,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i32: @@ -647,6 +708,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -662,6 +724,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i32: @@ -671,6 +734,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -686,6 +750,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8i32: @@ -695,6 +760,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -709,6 +775,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8i32: @@ -718,6 +785,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -733,6 +801,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8i32: @@ -742,6 +811,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -756,6 +826,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i32: @@ -765,6 +836,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -779,12 +851,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -798,12 +872,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -815,12 +891,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -836,12 +914,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -855,6 +935,7 @@ ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i64: @@ -864,6 +945,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -878,6 +960,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -893,6 +976,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -908,6 +992,7 @@ ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8i64: @@ -917,6 +1002,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -931,6 +1017,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -946,6 +1033,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -960,6 +1048,7 @@ ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i32_v8i64: @@ -969,6 +1058,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -983,6 +1073,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -998,6 +1089,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1012,6 +1104,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs %v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1026,6 +1119,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f16: @@ -1033,6 +1127,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x half> @llvm.vp.gather.v2f16.v2p0f16(<2 x half*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x half> %v @@ -1046,6 +1141,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f16: @@ -1053,6 +1149,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0f16(<4 x half*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x half> %v @@ -1064,6 +1161,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f16: @@ -1071,6 +1169,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1086,6 +1185,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f16: @@ -1093,6 +1193,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -1106,6 +1207,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f16: @@ -1115,6 +1217,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1129,6 +1232,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f16: @@ -1138,6 +1242,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1153,6 +1258,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f16: @@ -1162,6 +1268,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1177,6 +1284,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8f16: @@ -1186,6 +1294,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1199,6 +1308,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f32: @@ -1206,6 +1316,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x float> @llvm.vp.gather.v2f32.v2p0f32(<2 x float*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x float> %v @@ -1218,6 +1329,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f32: @@ -1225,6 +1337,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -1235,6 +1348,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f32: @@ -1242,6 +1356,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1256,6 +1371,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f32: @@ -1263,6 +1379,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -1276,6 +1393,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f32: @@ -1285,6 +1403,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1299,6 +1418,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f32: @@ -1308,6 +1428,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1323,6 +1444,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f32: @@ -1332,6 +1454,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1347,6 +1470,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8f32: @@ -1356,6 +1480,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1370,6 +1495,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8f32: @@ -1379,6 +1505,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1394,6 +1521,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8f32: @@ -1403,6 +1531,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1417,6 +1546,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8f32: @@ -1426,6 +1556,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1440,12 +1571,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*> %ptrs, <2 x i1> %m, i32 %evl) ret <2 x double> %v @@ -1459,12 +1592,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x double> %v @@ -1476,12 +1611,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1497,12 +1634,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x double> %v @@ -1516,6 +1655,7 @@ ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f64: @@ -1525,6 +1665,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1539,6 +1680,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1554,6 +1696,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1569,6 +1712,7 @@ ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8f64: @@ -1578,6 +1722,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1592,6 +1737,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1607,6 +1753,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1621,6 +1768,7 @@ ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_v8i32_v8f64: @@ -1630,6 +1778,7 @@ ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1644,6 +1793,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1659,6 +1809,7 @@ ; CHECK-NEXT: vsll.vi v8, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1673,6 +1824,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs %v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x i8> @llvm.vp.load.v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x i8> %load @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x i8> @llvm.vp.load.v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x i8> %load @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x i8> @llvm.vp.load.v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x i8> %load @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x i16> @llvm.vp.load.v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x i16> %load @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x i16> @llvm.vp.load.v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x i16> %load @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x i16> @llvm.vp.load.v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x i16> %load @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x i32> @llvm.vp.load.v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x i32> %load @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x i32> @llvm.vp.load.v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x i32> %load @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x i32> @llvm.vp.load.v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x i32> %load @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x i64> @llvm.vp.load.v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x i64> %load @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x i64> @llvm.vp.load.v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x i64> %load @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x i64> @llvm.vp.load.v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x i64> %load @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x half> @llvm.vp.load.v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x half> %load @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x half> @llvm.vp.load.v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x half> %load @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x half> @llvm.vp.load.v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x half> %load @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x float> @llvm.vp.load.v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x float> %load @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x float> @llvm.vp.load.v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x float> %load @@ -215,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x float> @llvm.vp.load.v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x float> %load @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <2 x double> @llvm.vp.load.v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl) ret <2 x double> %load @@ -239,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <4 x double> @llvm.vp.load.v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl) ret <4 x double> %load @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call <8 x double> @llvm.vp.load.v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl) ret <8 x double> %load diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -11,12 +11,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -29,6 +31,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: @@ -37,6 +40,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i16> %val to <2 x i8> call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -52,6 +56,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: @@ -62,6 +67,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i8> call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -79,6 +85,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: @@ -91,6 +98,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i8> call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %tval, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -104,12 +112,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -120,12 +130,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -140,12 +152,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -158,6 +172,7 @@ ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8: @@ -166,6 +181,7 @@ ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) @@ -179,12 +195,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -197,6 +215,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: @@ -205,6 +224,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i16> call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -220,6 +240,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: @@ -230,6 +251,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i16> call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %tval, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -243,12 +265,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -259,12 +283,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -279,12 +305,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -298,6 +326,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i16: @@ -307,6 +336,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) @@ -321,6 +351,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i16: @@ -330,6 +361,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -345,6 +377,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i16: @@ -354,6 +387,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -369,6 +403,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16: @@ -378,6 +413,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) @@ -391,12 +427,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -409,6 +447,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: @@ -417,6 +456,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %tval, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) @@ -430,12 +470,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -446,12 +488,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -466,12 +510,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -485,6 +531,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i32: @@ -494,6 +541,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -508,6 +556,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i32: @@ -517,6 +566,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -532,6 +582,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i32: @@ -541,6 +592,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -556,6 +608,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8i32: @@ -565,6 +618,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -579,6 +633,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8i32: @@ -588,6 +643,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -603,6 +659,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8i32: @@ -612,6 +669,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -626,6 +684,7 @@ ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32: @@ -635,6 +694,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -648,12 +708,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -666,12 +728,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -682,12 +746,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -702,12 +768,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -721,6 +789,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i64: @@ -730,6 +799,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -744,6 +814,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -759,6 +830,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -774,6 +846,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8i64: @@ -783,6 +856,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -797,6 +871,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -812,6 +887,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -826,6 +902,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32_v8i64: @@ -835,6 +912,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -849,6 +927,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -864,6 +943,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -878,6 +958,7 @@ ; CHECK-NEXT: vsll.vi v12, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -891,12 +972,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -909,12 +992,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -925,12 +1010,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -945,12 +1032,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -964,6 +1053,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f16: @@ -973,6 +1063,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) @@ -987,6 +1078,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f16: @@ -996,6 +1088,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1011,6 +1104,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f16: @@ -1020,6 +1114,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1035,6 +1130,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8f16: @@ -1044,6 +1140,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1057,12 +1154,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -1075,12 +1174,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -1091,12 +1192,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1111,12 +1214,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -1130,6 +1235,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f32: @@ -1139,6 +1245,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1153,6 +1260,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f32: @@ -1162,6 +1270,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1177,6 +1286,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f32: @@ -1186,6 +1296,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1201,6 +1312,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8f32: @@ -1210,6 +1322,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1224,6 +1337,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8f32: @@ -1233,6 +1347,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1248,6 +1363,7 @@ ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8f32: @@ -1257,6 +1373,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1271,6 +1388,7 @@ ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8f32: @@ -1280,6 +1398,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1293,12 +1412,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 %evl) ret void @@ -1311,12 +1432,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 %evl) ret void @@ -1327,12 +1450,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1347,12 +1472,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) ret void @@ -1366,6 +1493,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f64: @@ -1375,6 +1503,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1389,6 +1518,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1404,6 +1534,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1419,6 +1550,7 @@ ; RV32-NEXT: vsll.vi v12, v14, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8f64: @@ -1428,6 +1560,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1442,6 +1575,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1457,6 +1591,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1471,6 +1606,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32_v8f64: @@ -1480,6 +1616,7 @@ ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1494,6 +1631,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1509,6 +1647,7 @@ ; CHECK-NEXT: vsll.vi v12, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1523,6 +1662,7 @@ ; CHECK-NEXT: vsll.vi v12, v12, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -215,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 %evl) ret void @@ -239,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 %evl) ret void @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 %evl) ret void @@ -261,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement <2 x i1> undef, i1 true, i32 0 %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %v) ret i1 %red @@ -31,6 +32,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %v) ret i1 %red @@ -47,6 +49,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %v) ret i1 %red @@ -61,6 +64,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> %v) ret i1 %red @@ -75,6 +79,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v2i1(<2 x i1> %v) ret i1 %red @@ -90,6 +95,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v2i1(<2 x i1> %v) ret i1 %red @@ -104,6 +110,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> %v) ret i1 %red @@ -118,6 +125,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v4i1(<4 x i1> %v) ret i1 %red @@ -133,6 +141,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> %v) ret i1 %red @@ -147,6 +156,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> %v) ret i1 %red @@ -161,6 +171,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v8i1(<8 x i1> %v) ret i1 %red @@ -176,6 +187,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> %v) ret i1 %red @@ -190,6 +202,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> %v) ret i1 %red @@ -204,6 +217,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v16i1(<16 x i1> %v) ret i1 %red @@ -219,6 +233,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v16i1(<16 x i1> %v) ret i1 %red @@ -234,6 +249,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_or_v32i1: @@ -243,6 +259,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v32i1(<32 x i1> %v) ret i1 %red @@ -258,6 +275,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_xor_v32i1: @@ -267,6 +285,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v32i1(<32 x i1> %v) ret i1 %red @@ -282,6 +301,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_and_v32i1: @@ -292,6 +312,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v32i1(<32 x i1> %v) ret i1 %red @@ -309,6 +330,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_or_v64i1: @@ -318,6 +340,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v64i1(<64 x i1> %v) ret i1 %red @@ -335,6 +358,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_xor_v64i1: @@ -344,6 +368,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v64i1(<64 x i1> %v) ret i1 %red @@ -361,6 +386,7 @@ ; LMULMAX1-NEXT: vpopc.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 ; LMULMAX1-NEXT: neg a0, a0 +; LMULMAX1-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX1-NEXT: ret ; ; LMULMAX8-LABEL: vreduce_and_v64i1: @@ -371,6 +397,7 @@ ; LMULMAX8-NEXT: vpopc.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 +; LMULMAX8-NEXT: .cfi_def_cfa_offset 0 ; LMULMAX8-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v64i1(<64 x i1> %v) ret i1 %red diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.srem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -28,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -38,6 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -50,6 +53,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -62,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -78,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -88,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -100,6 +107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -112,6 +120,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -128,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.srem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) ret <6 x i8> %v @@ -140,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -150,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -162,6 +174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -174,6 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -190,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -200,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -212,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -224,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -240,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -250,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -262,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -274,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -290,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -300,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -312,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -324,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -340,6 +366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -350,6 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -362,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -374,6 +403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -390,6 +420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -400,6 +431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -412,6 +444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -424,6 +457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -440,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -450,6 +485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -462,6 +498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -490,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -500,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -524,6 +565,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -540,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -562,6 +606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -574,6 +619,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -590,6 +636,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -600,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -612,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -624,6 +673,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -640,6 +690,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -650,6 +701,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -670,12 +722,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -696,12 +750,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -718,6 +774,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -728,6 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -748,12 +806,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -774,12 +834,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -796,6 +858,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -806,6 +869,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -826,12 +890,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -852,12 +918,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -874,6 +942,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -884,6 +953,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -904,12 +974,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -930,12 +1002,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.urem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -27,6 +28,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -37,6 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -49,6 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -61,6 +65,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -87,6 +93,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <6 x i8> @llvm.vp.urem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) ret <6 x i8> %v @@ -139,6 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -173,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -189,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -239,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -249,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -289,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -299,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -323,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -361,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -373,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -399,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -411,6 +443,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -423,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -439,6 +473,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -449,6 +484,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -461,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -473,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -489,6 +527,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -499,6 +538,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -511,6 +551,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -523,6 +564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -549,6 +592,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -561,6 +605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -573,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -589,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -599,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -611,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -623,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -639,6 +689,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -649,6 +700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -669,12 +721,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -695,12 +749,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -717,6 +773,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -727,6 +784,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -747,12 +805,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -773,12 +833,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -795,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -805,6 +868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -825,12 +889,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -851,12 +917,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -873,6 +941,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -883,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -903,12 +973,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -929,12 +1001,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -37,6 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 2, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -49,6 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 2, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -91,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 2, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 2, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -131,6 +140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -145,6 +155,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 2, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -157,6 +168,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 2, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -185,6 +198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -199,6 +213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 2, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -211,6 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 2, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -239,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -253,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 2, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -265,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 2, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -293,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -307,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 2, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -319,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 2, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -347,6 +372,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -361,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 2, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -373,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 2, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -401,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -415,6 +445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 2, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -427,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 2, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -455,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -469,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 2, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -481,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 2, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -509,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -523,6 +561,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 2, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -535,6 +574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 2, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -563,6 +604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -577,6 +619,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 2, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -589,6 +632,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 2, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -617,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -631,6 +677,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 2, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -643,6 +690,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 2, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -667,12 +715,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -693,12 +743,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 2, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -725,6 +778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 2, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -749,12 +803,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -775,12 +831,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -795,6 +853,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 2, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -807,6 +866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 2, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -831,12 +891,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -857,12 +919,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -877,6 +941,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 2, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -889,6 +954,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 2, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -913,12 +979,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -939,12 +1007,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -959,6 +1029,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 2, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -971,6 +1042,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 2, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b) ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 5, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b) ret <4 x i8> %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 5, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b) ret <8 x i8> %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -105,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 5, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b) ret <16 x i8> %v @@ -129,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 5, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b) ret <2 x i16> %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -177,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 5, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b) ret <4 x i16> %v @@ -201,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 5, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b) ret <8 x i16> %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -249,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 5, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b) ret <16 x i16> %v @@ -273,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 5, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b) ret <2 x i32> %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -321,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 5, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b) ret <4 x i32> %v @@ -345,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 5, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -371,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b) ret <8 x i32> %v @@ -381,6 +412,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -393,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 5, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -407,6 +440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b) ret <16 x i32> %v @@ -417,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -429,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 5, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -443,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b) ret <2 x i64> %v @@ -460,12 +497,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_v2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -478,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 5, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -492,6 +532,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b) ret <4 x i64> %v @@ -509,12 +550,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_v4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -527,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 5, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -541,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b) ret <8 x i64> %v @@ -558,12 +603,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_v8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -576,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 5, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -590,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b) ret <16 x i64> %v @@ -607,12 +656,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_v16i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -625,6 +676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 5, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b) ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 8, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b) ret <4 x i8> %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 8, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b) ret <8 x i8> %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -105,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 8, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b) ret <16 x i8> %v @@ -129,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 8, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b) ret <2 x i16> %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -177,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 8, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b) ret <4 x i16> %v @@ -201,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 8, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b) ret <8 x i16> %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -249,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 8, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b) ret <16 x i16> %v @@ -273,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 8, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b) ret <2 x i32> %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -321,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 8, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b) ret <4 x i32> %v @@ -345,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 8, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -371,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b) ret <8 x i32> %v @@ -381,6 +412,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -393,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 8, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -407,6 +440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b) ret <16 x i32> %v @@ -417,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -429,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 8, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -443,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b) ret <2 x i64> %v @@ -460,12 +497,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_v2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -478,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 8, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -492,6 +532,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b) ret <4 x i64> %v @@ -509,12 +550,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_v4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -527,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 8, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -541,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b) ret <8 x i64> %v @@ -558,12 +603,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_v8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -576,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 8, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -590,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b) ret <16 x i64> %v @@ -607,12 +656,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_v16i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -625,6 +676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 8, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: vse32.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %va = load <8 x i32>, <8 x i32>* %a %vb = load <8 x i32>, <8 x i32>* %b @@ -28,6 +29,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vse32.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <8 x i32>, <8 x i32>* %b %ahead = insertelement <8 x i32> undef, i32 %a, i32 0 @@ -46,6 +48,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <8 x i32>, <8 x i32>* %b %a = insertelement <8 x i32> undef, i32 -1, i32 0 @@ -65,6 +68,7 @@ ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: vse32.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %va = load <8 x float>, <8 x float>* %a %vb = load <8 x float>, <8 x float>* %b @@ -82,6 +86,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <8 x float>, <8 x float>* %b %ahead = insertelement <8 x float> undef, float %a, i32 0 @@ -100,6 +105,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: vse32.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <8 x float>, <8 x float>* %b %a = insertelement <8 x float> undef, float 0.0, i32 0 @@ -119,6 +125,7 @@ ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: vse16.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %va = load <16 x i16>, <16 x i16>* %a %vb = load <16 x i16>, <16 x i16>* %b @@ -136,6 +143,7 @@ ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vse16.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <16 x i16>, <16 x i16>* %b %ahead = insertelement <16 x i16> undef, i16 %a, i32 0 @@ -154,6 +162,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 4, v0 ; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <16 x i16>, <16 x i16>* %b %a = insertelement <16 x i16> undef, i16 4, i32 0 @@ -174,6 +183,7 @@ ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: vse16.v v8, (a3) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %va = load <32 x half>, <32 x half>* %a %vb = load <32 x half>, <32 x half>* %b @@ -192,6 +202,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <32 x half>, <32 x half>* %b %ahead = insertelement <32 x half> undef, half %a, i32 0 @@ -211,6 +222,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: vse16.v v8, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = load <32 x half>, <32 x half>* %b %a = insertelement <32 x half> undef, half 0.0, i32 0 @@ -228,6 +240,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <2 x i1> %cc, <2 x i1> %a, <2 x i1> %b ret <2 x i1> %v @@ -240,6 +253,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <4 x i1> %cc, <4 x i1> %a, <4 x i1> %b ret <4 x i1> %v @@ -252,6 +266,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <8 x i1> %cc, <8 x i1> %a, <8 x i1> %b ret <8 x i1> %v @@ -264,6 +279,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <16 x i1> %cc, <16 x i1> %a, <16 x i1> %b ret <16 x i1> %v @@ -277,6 +293,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <32 x i1> %cc, <32 x i1> %a, <32 x i1> %b ret <32 x i1> %v @@ -290,6 +307,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select <64 x i1> %cc, <64 x i1> %a, <64 x i1> %b ret <64 x i1> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.shl.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -26,6 +27,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -36,6 +38,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -60,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -74,6 +79,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 3, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -86,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 3, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -102,6 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.vp.shl.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) ret <3 x i8> %v @@ -114,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -124,6 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -136,6 +146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -148,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -162,6 +174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 3, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -174,6 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 3, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -190,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -200,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -212,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -224,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -238,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 3, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -250,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 3, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -266,6 +286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -276,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -288,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -300,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -314,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 3, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -326,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 3, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -342,6 +368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -352,6 +379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -364,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -376,6 +405,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -390,6 +420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 3, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -402,6 +433,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 3, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -418,6 +450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -428,6 +461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -440,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -452,6 +487,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -466,6 +502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 3, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -478,6 +515,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 3, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -494,6 +532,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -504,6 +543,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -516,6 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -528,6 +569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -542,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 3, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -554,6 +597,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 3, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -570,6 +614,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -580,6 +625,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -592,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -604,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -618,6 +666,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 3, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -630,6 +679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 3, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -646,6 +696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -656,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -668,6 +720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -680,6 +733,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -694,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 3, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -706,6 +761,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 3, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -722,6 +778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -732,6 +789,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -744,6 +802,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -756,6 +815,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -770,6 +830,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 3, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -782,6 +843,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 3, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -798,6 +860,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -808,6 +871,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -820,6 +884,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -832,6 +897,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -846,6 +912,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 3, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -858,6 +925,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 3, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -874,6 +942,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -884,6 +953,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -896,6 +966,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -908,6 +979,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -922,6 +994,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 3, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -934,6 +1007,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 3, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -950,6 +1024,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -960,6 +1035,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -972,12 +1048,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -990,12 +1068,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1010,6 +1090,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 3, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1022,6 +1103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 3, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1038,6 +1120,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1048,6 +1131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1060,12 +1144,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1078,12 +1164,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1098,6 +1186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 3, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1110,6 +1199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 3, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1126,6 +1216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1136,6 +1227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1148,12 +1240,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1166,12 +1260,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1186,6 +1282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 3, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1198,6 +1295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 3, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1214,6 +1312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1224,6 +1323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1236,12 +1336,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1254,12 +1356,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1274,6 +1378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 3, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1286,6 +1391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 3, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -28,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -38,6 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -50,6 +53,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -62,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -76,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 5, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -88,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 5, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -104,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -114,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -126,6 +135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -138,6 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -152,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 5, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -164,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 5, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -180,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) ret <7 x i8> %v @@ -192,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -202,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -214,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -226,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -240,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 5, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -252,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 5, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -268,6 +288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -278,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -290,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -302,6 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -316,6 +340,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 5, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -328,6 +353,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 5, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -344,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -354,6 +381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -366,6 +394,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -378,6 +407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -392,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 5, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -404,6 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 5, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -420,6 +452,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -430,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -442,6 +476,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -454,6 +489,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -468,6 +504,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 5, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -480,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 5, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -496,6 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -506,6 +545,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -518,6 +558,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -530,6 +571,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -544,6 +586,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 5, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -556,6 +599,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 5, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -572,6 +616,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -582,6 +627,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -594,6 +640,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -606,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -620,6 +668,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 5, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -632,6 +681,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 5, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -648,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -658,6 +709,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -670,6 +722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -682,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -696,6 +750,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 5, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -708,6 +763,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 5, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -724,6 +780,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -734,6 +791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -746,6 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -758,6 +817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -772,6 +832,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 5, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -784,6 +845,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 5, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -800,6 +862,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -810,6 +873,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -822,6 +886,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -834,6 +899,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -848,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 5, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -860,6 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 5, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -876,6 +944,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -886,6 +955,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -898,6 +968,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -910,6 +981,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -924,6 +996,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 5, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -936,6 +1009,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 5, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -952,6 +1026,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -962,6 +1037,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -974,12 +1050,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -992,12 +1070,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1012,6 +1092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 5, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1024,6 +1105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 5, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1040,6 +1122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1050,6 +1133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1062,12 +1146,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1080,12 +1166,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1100,6 +1188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 5, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1112,6 +1201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 5, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1128,6 +1218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1138,6 +1229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1150,12 +1242,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1168,12 +1262,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1188,6 +1284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 5, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1200,6 +1297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 5, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1216,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1226,6 +1325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1238,12 +1338,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1256,12 +1358,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1276,6 +1380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 5, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1288,6 +1393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 5, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -27,6 +28,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -37,6 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -49,6 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -61,6 +65,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 4, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -87,6 +93,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 4, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -113,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -125,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -151,6 +162,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 4, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -163,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 4, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -179,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) ret <7 x i8> %v @@ -191,6 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -201,6 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -213,6 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -239,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 4, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 4, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -277,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -289,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -301,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -315,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 4, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 4, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -343,6 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -353,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -365,6 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -391,6 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 4, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 4, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -419,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -429,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -441,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -467,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 4, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -479,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 4, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -495,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -505,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -517,6 +557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -529,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -543,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 4, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -555,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 4, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -571,6 +615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -581,6 +626,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -593,6 +639,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -605,6 +652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -619,6 +667,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 4, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -631,6 +680,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 4, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -647,6 +697,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -657,6 +708,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -669,6 +721,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -681,6 +734,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -695,6 +749,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 4, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -707,6 +762,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 4, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -723,6 +779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -733,6 +790,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -745,6 +803,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -757,6 +816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -771,6 +831,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 4, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -783,6 +844,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 4, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -799,6 +861,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -809,6 +872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -821,6 +885,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -833,6 +898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -847,6 +913,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 4, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -859,6 +926,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 4, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -875,6 +943,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -885,6 +954,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -897,6 +967,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -909,6 +980,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -923,6 +995,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 4, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -935,6 +1008,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 4, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -951,6 +1025,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -961,6 +1036,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -973,12 +1049,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -991,12 +1069,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1011,6 +1091,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 4, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1023,6 +1104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 4, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1039,6 +1121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1049,6 +1132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1061,12 +1145,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1079,12 +1165,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1099,6 +1187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 4, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1111,6 +1200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 4, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1127,6 +1217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1137,6 +1228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1149,12 +1241,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1167,12 +1261,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1187,6 +1283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 4, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1199,6 +1296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 4, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1215,6 +1313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1225,6 +1324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1237,12 +1337,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1255,12 +1357,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1275,6 +1379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 4, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1287,6 +1392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 4, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b) ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -34,6 +36,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b) ret <4 x i8> %v @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b) ret <8 x i8> %v @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -122,6 +131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b) ret <16 x i8> %v @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -145,6 +156,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b) ret <2 x i16> %v @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -182,6 +196,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -196,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b) ret <4 x i16> %v @@ -206,6 +222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -219,6 +236,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b) ret <8 x i16> %v @@ -243,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -256,6 +276,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -270,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b) ret <16 x i16> %v @@ -280,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -293,6 +316,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b) ret <2 x i32> %v @@ -317,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -330,6 +356,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -344,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b) ret <4 x i32> %v @@ -354,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -367,6 +396,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -381,6 +411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b) ret <8 x i32> %v @@ -391,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -404,6 +436,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -418,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b) ret <16 x i32> %v @@ -428,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -441,6 +476,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -455,6 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b) ret <2 x i64> %v @@ -472,12 +509,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_v2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -491,6 +530,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -505,6 +545,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b) ret <4 x i64> %v @@ -522,12 +563,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_v4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -541,6 +584,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -555,6 +599,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b) ret <8 x i64> %v @@ -572,12 +617,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_v8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -591,6 +638,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -605,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b) ret <16 x i64> %v @@ -622,12 +671,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_v16i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -641,6 +692,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b) ret <2 x i8> %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -34,6 +36,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 2, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b) ret <4 x i8> %v @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 2, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b) ret <8 x i8> %v @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 2, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -122,6 +131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b) ret <16 x i8> %v @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -145,6 +156,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 2, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b) ret <2 x i16> %v @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -182,6 +196,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 2, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -196,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b) ret <4 x i16> %v @@ -206,6 +222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -219,6 +236,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 2, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b) ret <8 x i16> %v @@ -243,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -256,6 +276,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 2, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -270,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b) ret <16 x i16> %v @@ -280,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -293,6 +316,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 2, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b) ret <2 x i32> %v @@ -317,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -330,6 +356,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 2, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -344,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b) ret <4 x i32> %v @@ -354,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -367,6 +396,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 2, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -381,6 +411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b) ret <8 x i32> %v @@ -391,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -404,6 +436,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 2, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -418,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b) ret <16 x i32> %v @@ -428,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -441,6 +476,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 2, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -455,6 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b) ret <2 x i64> %v @@ -472,12 +509,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_v2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -491,6 +530,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 2, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -505,6 +545,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b) ret <4 x i64> %v @@ -522,12 +563,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_v4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -541,6 +584,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 2, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -555,6 +599,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b) ret <8 x i64> %v @@ -572,12 +617,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_v8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -591,6 +638,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 2, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -605,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b) ret <16 x i64> %v @@ -622,12 +671,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_v16i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -641,6 +692,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 2, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.sub.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) ret <3 x i8> %v @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <3 x i1> undef, i1 true, i32 0 %m = shufflevector <3 x i1> %head, <3 x i1> undef, <3 x i32> zeroinitializer @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0 %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0 %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer @@ -123,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -133,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -145,6 +156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -157,6 +169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -173,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -183,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -195,6 +210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -207,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -223,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -245,6 +264,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -257,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -283,6 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -295,6 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -323,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -333,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -345,6 +372,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -357,6 +385,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -373,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -383,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -395,6 +426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -407,6 +439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -423,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -433,6 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -445,6 +480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -457,6 +493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -473,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -483,6 +521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -495,6 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -507,6 +547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -523,6 +564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -533,6 +575,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -545,6 +588,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -557,6 +601,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -573,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -583,6 +629,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -595,6 +642,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -607,6 +655,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -623,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -633,6 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -645,6 +696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -657,6 +709,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -673,6 +726,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -683,6 +737,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -703,12 +758,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -729,12 +786,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -751,6 +810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -761,6 +821,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -781,12 +842,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -807,12 +870,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -829,6 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -839,6 +905,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -859,12 +926,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -885,12 +954,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -907,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -917,6 +989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -937,12 +1010,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -963,12 +1038,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -26,6 +27,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -43,6 +45,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = load <2 x i16>, <2 x i16>* %y @@ -60,6 +63,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -77,6 +81,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -94,6 +99,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load <2 x i32>, <2 x i32>* %y @@ -111,6 +117,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -128,6 +135,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -145,6 +153,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -163,6 +172,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -180,6 +190,7 @@ ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -197,6 +208,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -215,6 +227,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -233,6 +246,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = load <32 x i16>, <32 x i16>* %y @@ -250,6 +264,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = load <16 x i32>, <16 x i32>* %y @@ -266,6 +281,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = insertelement <2 x i8> undef, i8 %y, i32 0 @@ -283,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = insertelement <4 x i8> undef, i8 %y, i32 0 @@ -300,6 +317,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = insertelement <2 x i16> undef, i16 %y, i32 0 @@ -317,6 +335,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -334,6 +353,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = insertelement <4 x i16> undef, i16 %y, i32 0 @@ -351,6 +371,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = insertelement <2 x i32> undef, i32 %y, i64 0 @@ -368,6 +389,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -385,6 +407,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -402,6 +425,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i64 0 @@ -420,6 +444,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -437,6 +462,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = insertelement <16 x i16> undef, i16 %y, i32 0 @@ -454,6 +480,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = insertelement <8 x i32> undef, i32 %y, i64 0 @@ -472,6 +499,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -490,6 +518,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = insertelement <32 x i16> undef, i16 %y, i32 0 @@ -507,6 +536,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = insertelement <16 x i32> undef, i32 %y, i64 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -26,6 +27,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -43,6 +45,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = load <2 x i16>, <2 x i16>* %y @@ -60,6 +63,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -77,6 +81,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -94,6 +99,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load <2 x i32>, <2 x i32>* %y @@ -111,6 +117,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -128,6 +135,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -145,6 +153,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -163,6 +172,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -180,6 +190,7 @@ ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -197,6 +208,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -215,6 +227,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -233,6 +246,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = load <32 x i16>, <32 x i16>* %y @@ -250,6 +264,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = load <16 x i32>, <16 x i32>* %y @@ -266,6 +281,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = insertelement <2 x i8> undef, i8 %y, i32 0 @@ -283,6 +299,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = insertelement <4 x i8> undef, i8 %y, i32 0 @@ -300,6 +317,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = insertelement <2 x i16> undef, i16 %y, i32 0 @@ -317,6 +335,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -334,6 +353,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = insertelement <4 x i16> undef, i16 %y, i32 0 @@ -351,6 +371,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = insertelement <2 x i32> undef, i32 %y, i64 0 @@ -368,6 +389,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -385,6 +407,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -402,6 +425,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i64 0 @@ -420,6 +444,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -437,6 +462,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = insertelement <16 x i16> undef, i16 %y, i32 0 @@ -454,6 +480,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = insertelement <8 x i32> undef, i32 %y, i64 0 @@ -472,6 +499,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -490,6 +518,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = insertelement <32 x i16> undef, i16 %y, i32 0 @@ -507,6 +536,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = insertelement <16 x i32> undef, i32 %y, i64 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -25,6 +26,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -41,6 +43,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = load <2 x i16>, <2 x i16>* %y @@ -57,6 +60,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -73,6 +77,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -89,6 +94,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load <2 x i32>, <2 x i32>* %y @@ -105,6 +111,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -121,6 +128,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -137,6 +145,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -154,6 +163,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -170,6 +180,7 @@ ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -186,6 +197,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -203,6 +215,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -220,6 +233,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = load <32 x i16>, <32 x i16>* %y @@ -236,6 +250,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = load <16 x i32>, <16 x i32>* %y @@ -272,6 +287,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -308,6 +324,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i16>, <64 x i16>* %x %b = load <64 x i16>, <64 x i16>* %y @@ -343,6 +360,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i32>, <32 x i32>* %x %b = load <32 x i32>, <32 x i32>* %y @@ -362,6 +380,7 @@ ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 ; CHECK-NEXT: vwmul.vv v8, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -380,6 +399,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vwmul.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -397,6 +417,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v11, v8 ; CHECK-NEXT: vwmul.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -412,6 +433,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = insertelement <2 x i8> undef, i8 %y, i32 0 @@ -428,6 +450,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = insertelement <4 x i8> undef, i8 %y, i32 0 @@ -444,6 +467,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = insertelement <2 x i16> undef, i16 %y, i32 0 @@ -460,6 +484,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -476,6 +501,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = insertelement <4 x i16> undef, i16 %y, i32 0 @@ -492,6 +518,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = insertelement <2 x i32> undef, i32 %y, i64 0 @@ -508,6 +535,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -524,6 +552,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -540,6 +569,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i64 0 @@ -557,6 +587,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -573,6 +604,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = insertelement <16 x i16> undef, i16 %y, i32 0 @@ -589,6 +621,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = insertelement <8 x i32> undef, i32 %y, i64 0 @@ -606,6 +639,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -623,6 +657,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = insertelement <32 x i16> undef, i16 %y, i32 0 @@ -639,6 +674,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = insertelement <16 x i32> undef, i32 %y, i64 0 @@ -656,6 +692,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load i8, i8* %y @@ -676,6 +713,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load i16, i16* %y @@ -693,6 +731,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i8, i8* %y @@ -711,6 +750,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lh a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i16, i16* %y @@ -731,6 +771,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i32, i32* %y @@ -758,6 +799,7 @@ ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmul_vx_v2i64_i8: @@ -766,6 +808,7 @@ ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lb a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load i8, i8* %y @@ -794,6 +837,7 @@ ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmul_vx_v2i64_i16: @@ -802,6 +846,7 @@ ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lh a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load i16, i16* %y @@ -830,6 +875,7 @@ ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmul_vx_v2i64_i32: @@ -838,6 +884,7 @@ ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lw a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load i32, i32* %y @@ -866,6 +913,7 @@ ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmul_vx_v2i64_i64: @@ -876,6 +924,7 @@ ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vsext.vf2 v9, v8 ; RV64-NEXT: vmul.vx v8, v9, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load i64, i64* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -25,6 +26,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -41,6 +43,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = load <2 x i16>, <2 x i16>* %y @@ -57,6 +60,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -73,6 +77,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -89,6 +94,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = load <2 x i32>, <2 x i32>* %y @@ -105,6 +111,7 @@ ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -121,6 +128,7 @@ ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = load <8 x i16>, <8 x i16>* %y @@ -137,6 +145,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i32>, <4 x i32>* %y @@ -154,6 +163,7 @@ ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -170,6 +180,7 @@ ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = load <16 x i16>, <16 x i16>* %y @@ -186,6 +197,7 @@ ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = load <8 x i32>, <8 x i32>* %y @@ -203,6 +215,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -220,6 +233,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = load <32 x i16>, <32 x i16>* %y @@ -236,6 +250,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = load <16 x i32>, <16 x i32>* %y @@ -272,6 +287,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -308,6 +324,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i16>, <64 x i16>* %x %b = load <64 x i16>, <64 x i16>* %y @@ -343,6 +360,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i32>, <32 x i32>* %x %b = load <32 x i32>, <32 x i32>* %y @@ -362,6 +380,7 @@ ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 ; CHECK-NEXT: vwmulu.vv v8, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = load <2 x i8>, <2 x i8>* %y @@ -380,6 +399,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vwmulu.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = load <4 x i16>, <4 x i16>* %y @@ -397,6 +417,7 @@ ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v11, v8 ; CHECK-NEXT: vwmulu.vv v8, v10, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = load <4 x i8>, <4 x i8>* %y @@ -412,6 +433,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x %b = insertelement <2 x i8> undef, i8 %y, i32 0 @@ -428,6 +450,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x %b = insertelement <4 x i8> undef, i8 %y, i32 0 @@ -444,6 +467,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i16>, <2 x i16>* %x %b = insertelement <2 x i16> undef, i16 %y, i32 0 @@ -460,6 +484,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -476,6 +501,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = insertelement <4 x i16> undef, i16 %y, i32 0 @@ -492,6 +518,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, <2 x i32>* %x %b = insertelement <2 x i32> undef, i32 %y, i64 0 @@ -508,6 +535,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -524,6 +552,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = insertelement <8 x i16> undef, i16 %y, i32 0 @@ -540,6 +569,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = insertelement <4 x i32> undef, i32 %y, i64 0 @@ -557,6 +587,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -573,6 +604,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x %b = insertelement <16 x i16> undef, i16 %y, i32 0 @@ -589,6 +621,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %b = insertelement <8 x i32> undef, i32 %y, i64 0 @@ -606,6 +639,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -623,6 +657,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = insertelement <32 x i16> undef, i16 %y, i32 0 @@ -639,6 +674,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = insertelement <16 x i32> undef, i32 %y, i64 0 @@ -656,6 +692,7 @@ ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load i8, i8* %y @@ -676,6 +713,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load i16, i16* %y @@ -693,6 +731,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i8, i8* %y @@ -711,6 +750,7 @@ ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lhu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i16, i16* %y @@ -731,6 +771,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmul.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x %b = load i32, i32* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) ret <8 x i7> %v @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) ret <2 x i8> %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 7, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 7, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -97,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> undef, i8 -1, i32 0 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer @@ -125,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) ret <4 x i8> %v @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -173,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 7, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 7, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> undef, i8 -1, i32 0 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer @@ -227,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) ret <8 x i8> %v @@ -237,6 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -249,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -261,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 7, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -287,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 7, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -301,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -313,6 +337,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> undef, i8 -1, i32 0 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer @@ -329,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl) ret <9 x i8> %v @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <9 x i1> undef, i1 true, i32 0 %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer @@ -351,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -363,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 7, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 7, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer @@ -431,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) ret <16 x i8> %v @@ -441,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -479,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 7, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -491,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 7, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -505,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -517,6 +557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> undef, i8 -1, i32 0 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer @@ -533,6 +574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) ret <2 x i16> %v @@ -543,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -555,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -567,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -581,6 +626,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 7, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -593,6 +639,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 7, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -607,6 +654,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -619,6 +667,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> undef, i16 -1, i32 0 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer @@ -635,6 +684,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) ret <4 x i16> %v @@ -645,6 +695,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -657,6 +708,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -669,6 +721,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -683,6 +736,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 7, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -695,6 +749,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 7, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -709,6 +764,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -721,6 +777,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> undef, i16 -1, i32 0 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer @@ -737,6 +794,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -747,6 +805,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -759,6 +818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -771,6 +831,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -785,6 +846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 7, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -797,6 +859,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 7, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -811,6 +874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -823,6 +887,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> undef, i16 -1, i32 0 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer @@ -839,6 +904,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) ret <16 x i16> %v @@ -849,6 +915,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -861,6 +928,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -873,6 +941,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -887,6 +956,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 7, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -899,6 +969,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 7, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -913,6 +984,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -925,6 +997,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> undef, i16 -1, i32 0 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer @@ -941,6 +1014,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) ret <2 x i32> %v @@ -951,6 +1025,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -963,6 +1038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -975,6 +1051,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -989,6 +1066,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 7, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -1001,6 +1079,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 7, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -1015,6 +1094,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -1027,6 +1107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> undef, i32 -1, i32 0 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer @@ -1043,6 +1124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -1053,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1065,6 +1148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1077,6 +1161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1091,6 +1176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 7, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1103,6 +1189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 7, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1117,6 +1204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1129,6 +1217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> undef, i32 -1, i32 0 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer @@ -1145,6 +1234,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -1155,6 +1245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1167,6 +1258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1179,6 +1271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1193,6 +1286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 7, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1205,6 +1299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 7, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1219,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1231,6 +1327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> undef, i32 -1, i32 0 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer @@ -1247,6 +1344,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) ret <16 x i32> %v @@ -1257,6 +1355,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1269,6 +1368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1281,6 +1381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1295,6 +1396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 7, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1307,6 +1409,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 7, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1321,6 +1424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1333,6 +1437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> undef, i32 -1, i32 0 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer @@ -1349,6 +1454,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) ret <2 x i64> %v @@ -1359,6 +1465,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <2 x i1> undef, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer @@ -1379,12 +1486,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1405,12 +1514,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1425,6 +1536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 7, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1437,6 +1549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 7, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1451,6 +1564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1463,6 +1577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> undef, i64 -1, i32 0 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer @@ -1479,6 +1594,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) ret <4 x i64> %v @@ -1489,6 +1605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <4 x i1> undef, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1509,12 +1626,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1535,12 +1654,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1555,6 +1676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 7, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1567,6 +1689,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 7, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1581,6 +1704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1593,6 +1717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> undef, i64 -1, i32 0 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer @@ -1609,6 +1734,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) ret <8 x i64> %v @@ -1619,6 +1745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <8 x i1> undef, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer @@ -1639,12 +1766,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1665,12 +1794,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1685,6 +1816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 7, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1697,6 +1829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 7, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1711,6 +1844,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1723,6 +1857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> undef, i64 -1, i32 0 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer @@ -1739,6 +1874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) ret <16 x i64> %v @@ -1749,6 +1885,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement <16 x i1> undef, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer @@ -1769,12 +1906,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1795,12 +1934,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1815,6 +1956,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 7, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1827,6 +1969,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 7, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1841,6 +1984,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer @@ -1853,6 +1997,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> undef, i64 -1, i32 0 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir --- a/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir +++ b/llvm/test/CodeGen/RISCV/rvv/get-vlen-debugloc.mir @@ -35,6 +35,7 @@ ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 1 ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET bb.0: bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -6,6 +6,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 0) ret %v @@ -15,6 +16,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 4) ret %v @@ -24,6 +26,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 0) ret %v @@ -33,6 +36,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 2) ret %v @@ -42,6 +46,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 4) ret %v @@ -51,6 +56,7 @@ ; CHECK-LABEL: insert_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 6) ret %v @@ -63,6 +69,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) ret %v @@ -78,6 +85,7 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) ret %v @@ -87,6 +95,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 0) ret %v @@ -96,6 +105,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 8) ret %v @@ -105,6 +115,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 0) ret %v @@ -114,6 +125,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 4) ret %v @@ -123,6 +135,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 8) ret %v @@ -132,6 +145,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v14, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 12) ret %v @@ -141,6 +155,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 0) ret %v @@ -150,6 +165,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 2) ret %v @@ -159,6 +175,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 4) ret %v @@ -168,6 +185,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 6) ret %v @@ -177,6 +195,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 8) ret %v @@ -186,6 +205,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v13, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 10) ret %v @@ -195,6 +215,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v14, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 12) ret %v @@ -204,6 +225,7 @@ ; CHECK-LABEL: insert_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v15, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 14) ret %v @@ -216,6 +238,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) ret %v @@ -229,6 +252,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) ret %v @@ -241,6 +265,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v11, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) ret %v @@ -253,6 +278,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) ret %v @@ -266,6 +292,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) ret %v @@ -280,6 +307,7 @@ ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) ret %v @@ -295,6 +323,7 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) ret %v @@ -309,6 +338,7 @@ ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) ret %v @@ -323,6 +353,7 @@ ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v9, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) ret %v @@ -335,6 +366,7 @@ ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) ret %v @@ -348,6 +380,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) ret %v @@ -361,6 +394,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v14, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) ret %v @@ -370,6 +404,7 @@ ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 0) ret %v @@ -385,6 +420,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v22, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 26) ret %v @@ -397,6 +433,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) ret %vec @@ -410,6 +447,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) ret %vec @@ -431,6 +469,7 @@ ; CHECK-NEXT: vslideup.vi v9, v8, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 0) ret %vec @@ -454,6 +493,7 @@ ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vec = call @llvm.experimental.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 2) ret %vec @@ -469,6 +509,7 @@ ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v0 = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) @@ -480,6 +521,7 @@ ; CHECK-LABEL: insert_nxv8i64_nxv16i64_lo: ; CHECK: # %bb.0: ; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) store %v, * %out @@ -493,6 +535,7 @@ ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) store %v, * %out diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -19,6 +20,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -42,6 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -54,6 +58,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -67,6 +72,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -89,6 +96,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -102,6 +110,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -124,6 +134,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -137,6 +148,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -159,6 +172,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -182,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -194,6 +210,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -207,6 +224,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -217,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -229,6 +248,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -242,6 +262,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -252,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -264,6 +286,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -277,6 +300,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -287,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -299,6 +324,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -312,6 +338,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -322,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -334,6 +362,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -347,6 +376,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -357,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -369,6 +400,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -382,6 +414,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -392,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -404,6 +438,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -417,6 +452,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -427,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -439,6 +476,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -452,6 +490,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -462,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -474,6 +514,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -487,6 +528,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -497,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -509,6 +552,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -522,6 +566,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -19,6 +20,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -42,6 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -54,6 +58,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -67,6 +72,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -89,6 +96,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -102,6 +110,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -124,6 +134,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -137,6 +148,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -159,6 +172,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -182,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 ret %r @@ -194,6 +210,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 ret %r @@ -207,6 +224,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx ret %r @@ -217,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -229,6 +248,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -242,6 +262,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -252,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -264,6 +286,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -277,6 +300,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -287,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -299,6 +324,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -312,6 +338,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -322,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -334,6 +362,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -347,6 +376,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -357,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 ret %r @@ -369,6 +400,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 ret %r @@ -382,6 +414,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx ret %r @@ -392,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -404,6 +438,7 @@ ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -417,6 +452,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -427,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -439,6 +476,7 @@ ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -452,6 +490,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -462,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -474,6 +514,7 @@ ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -487,6 +528,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r @@ -497,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 ret %r @@ -509,6 +552,7 @@ ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 ret %r @@ -522,6 +566,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -32,6 +33,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -49,6 +51,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -67,6 +70,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -84,6 +88,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -102,6 +107,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -119,6 +125,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -137,6 +144,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -154,6 +162,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -172,6 +181,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -189,6 +199,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -207,6 +218,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y @@ -224,6 +236,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 ret %y @@ -242,6 +255,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx ret %y diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -19,6 +20,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -42,6 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -54,6 +58,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -67,6 +72,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -89,6 +96,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -102,6 +110,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -124,6 +134,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -137,6 +148,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -159,6 +172,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -182,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -194,6 +210,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -207,6 +224,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -217,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -229,6 +248,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -242,6 +262,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -252,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -264,6 +286,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -277,6 +300,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -287,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -299,6 +324,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -312,6 +338,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -322,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -334,6 +362,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -347,6 +376,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -357,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -369,6 +400,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -382,6 +414,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -392,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -404,6 +438,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -417,6 +452,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -427,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -439,6 +476,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -452,6 +490,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -462,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -474,6 +514,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -487,6 +528,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -497,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -509,6 +552,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -522,6 +566,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -532,6 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -544,6 +590,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -557,6 +604,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -567,6 +615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -579,6 +628,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -592,6 +642,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -602,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -614,6 +666,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -627,6 +680,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -641,6 +695,7 @@ ; CHECK-NEXT: vslide1up.vx v9, v10, a0 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -655,6 +710,7 @@ ; CHECK-NEXT: vslide1up.vx v9, v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -670,6 +726,7 @@ ; CHECK-NEXT: addi a0, a2, 1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -684,6 +741,7 @@ ; CHECK-NEXT: vslide1up.vx v10, v12, a0 ; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -698,6 +756,7 @@ ; CHECK-NEXT: vslide1up.vx v10, v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -713,6 +772,7 @@ ; CHECK-NEXT: addi a0, a2, 1 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -727,6 +787,7 @@ ; CHECK-NEXT: vslide1up.vx v12, v16, a0 ; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -741,6 +802,7 @@ ; CHECK-NEXT: vslide1up.vx v12, v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -756,6 +818,7 @@ ; CHECK-NEXT: addi a0, a2, 1 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -770,6 +833,7 @@ ; CHECK-NEXT: vslide1up.vx v16, v24, a0 ; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -784,6 +848,7 @@ ; CHECK-NEXT: vslide1up.vx v16, v24, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -799,6 +864,7 @@ ; CHECK-NEXT: addi a0, a2, 1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -811,6 +877,7 @@ ; CHECK-NEXT: addi a0, zero, 10 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 0 ret %r @@ -824,6 +891,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 3 ret %r @@ -838,6 +906,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 %idx ret %r @@ -849,6 +918,7 @@ ; CHECK-NEXT: addi a0, zero, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 0 ret %r @@ -862,6 +932,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 3 ret %r @@ -876,6 +947,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 %idx ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -19,6 +20,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -42,6 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -54,6 +58,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -67,6 +72,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -89,6 +96,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -102,6 +110,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -124,6 +134,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -137,6 +148,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -159,6 +172,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -182,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -194,6 +210,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -207,6 +224,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -217,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 ret %r @@ -229,6 +248,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 ret %r @@ -242,6 +262,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx ret %r @@ -252,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -264,6 +286,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -277,6 +300,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -287,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -299,6 +324,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -312,6 +338,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -322,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -334,6 +362,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -347,6 +376,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -357,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -369,6 +400,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -382,6 +414,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -392,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -404,6 +438,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -417,6 +452,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -427,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 ret %r @@ -439,6 +476,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 ret %r @@ -452,6 +490,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx ret %r @@ -462,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -474,6 +514,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -487,6 +528,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -497,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -509,6 +552,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -522,6 +566,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -532,6 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -544,6 +590,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -557,6 +604,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -567,6 +615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -579,6 +628,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -592,6 +642,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -602,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 ret %r @@ -614,6 +666,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 ret %r @@ -627,6 +680,7 @@ ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx ret %r @@ -637,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -649,6 +704,7 @@ ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -663,6 +719,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -673,6 +730,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -685,6 +743,7 @@ ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -699,6 +758,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -709,6 +769,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -721,6 +782,7 @@ ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vi v8, v12, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -735,6 +797,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; CHECK-NEXT: vslideup.vx v8, v12, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r @@ -745,6 +808,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 ret %r @@ -757,6 +821,7 @@ ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 ret %r @@ -771,6 +836,7 @@ ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll @@ -49,6 +49,7 @@ ; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-1024-NEXT: vrgather.vv v16, v8, v12, v0.t ; RV64-1024-NEXT: vse16.v v16, (a0) +; RV64-1024-NEXT: .cfi_def_cfa_offset 0 ; RV64-1024-NEXT: ret ; ; RV64-2048-LABEL: interleave256: @@ -97,6 +98,7 @@ ; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-2048-NEXT: vrgather.vv v12, v8, v10, v0.t ; RV64-2048-NEXT: vse16.v v12, (a0) +; RV64-2048-NEXT: .cfi_def_cfa_offset 0 ; RV64-2048-NEXT: ret entry: %ve = load <128 x i16>, <128 x i16>* %0, align 256 @@ -229,6 +231,7 @@ ; RV64-1024-NEXT: mul a0, a0, a1 ; RV64-1024-NEXT: add sp, sp, a0 ; RV64-1024-NEXT: addi sp, sp, 16 +; RV64-1024-NEXT: .cfi_def_cfa_offset 0 ; RV64-1024-NEXT: ret ; ; RV64-2048-LABEL: interleave512: @@ -285,6 +288,7 @@ ; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-2048-NEXT: vrgather.vv v16, v8, v12, v0.t ; RV64-2048-NEXT: vse16.v v16, (a0) +; RV64-2048-NEXT: .cfi_def_cfa_offset 0 ; RV64-2048-NEXT: ret entry: %ve = load <256 x i16>, <256 x i16>* %0, align 512 diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir --- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir +++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir @@ -37,8 +37,12 @@ ; CHECK-NEXT: sub sp, s0, a0 ; CHECK-NEXT: addi sp, sp, 272 ; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload + ; CHECK-NEXT: .cfi_def_cfa sp, 2032 ; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload + ; CHECK-NEXT: .cfi_restore ra + ; CHECK-NEXT: .cfi_restore s0 ; CHECK-NEXT: addi sp, sp, 2032 + ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = trunc %a to ret %v @@ -21,6 +22,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = trunc %a to ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll @@ -10,6 +10,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -22,6 +23,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -34,6 +36,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -46,6 +49,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -58,6 +62,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -70,6 +75,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -82,6 +88,7 @@ ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -21,6 +21,7 @@ ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: addi sp, sp, 16 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca @@ -47,6 +48,7 @@ ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: addi sp, sp, 16 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca @@ -74,6 +76,7 @@ ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: addi sp, sp, 16 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca @@ -106,8 +109,12 @@ ; RV64IV-NEXT: vl4r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -32 ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_def_cfa sp, 32 ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_restore ra +; RV64IV-NEXT: .cfi_restore s0 ; RV64IV-NEXT: addi sp, sp, 32 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca @@ -140,8 +147,12 @@ ; RV64IV-NEXT: vl8r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -64 ; RV64IV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_def_cfa sp, 64 ; RV64IV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_restore ra +; RV64IV-NEXT: .cfi_restore s0 ; RV64IV-NEXT: addi sp, sp, 64 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca @@ -171,6 +182,7 @@ ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: addi sp, sp, 32 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local_scalar0 = alloca i32 %local0 = alloca @@ -218,8 +230,12 @@ ; RV64IV-NEXT: vl2r.v v8, (a0) ; RV64IV-NEXT: addi sp, s0, -32 ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_def_cfa sp, 32 ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_restore ra +; RV64IV-NEXT: .cfi_restore s0 ; RV64IV-NEXT: addi sp, sp, 32 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %1 = alloca i8, i64 %n %2 = alloca @@ -270,8 +286,13 @@ ; RV64IV-NEXT: addi sp, s0, -256 ; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_def_cfa sp, 256 ; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_restore ra +; RV64IV-NEXT: .cfi_restore s0 +; RV64IV-NEXT: .cfi_restore s1 ; RV64IV-NEXT: addi sp, sp, 256 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %1 = alloca i8, i64 %n %2 = alloca i32, align 128 diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll @@ -7,6 +7,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -40,6 +43,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -51,6 +55,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -62,6 +67,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -73,6 +79,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -84,6 +91,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -95,6 +103,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -106,6 +115,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -117,6 +127,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -128,6 +139,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -139,6 +151,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -150,6 +163,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -161,6 +175,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -172,6 +187,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -183,6 +199,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -194,6 +211,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -205,6 +223,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -216,6 +235,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -227,6 +247,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -238,6 +259,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -249,6 +271,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -260,6 +283,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -271,6 +295,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -282,6 +307,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -293,6 +319,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -304,6 +331,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -315,6 +343,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -326,6 +355,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -337,6 +367,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -348,6 +379,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -359,6 +391,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -370,6 +403,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -381,6 +415,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -392,6 +427,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -403,6 +439,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -414,6 +451,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -425,6 +463,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -436,6 +475,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -447,6 +487,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -458,6 +499,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -469,6 +511,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -480,6 +523,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -491,6 +535,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -502,6 +547,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -513,6 +559,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -524,6 +571,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -535,6 +583,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -546,6 +595,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -557,6 +607,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -568,6 +619,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -579,6 +631,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -590,6 +643,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -601,6 +655,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -612,6 +667,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -623,6 +679,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -634,6 +691,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -645,6 +703,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -656,6 +715,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -667,6 +727,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -678,6 +739,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -689,6 +751,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -700,6 +763,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -711,6 +775,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -722,6 +787,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll @@ -7,6 +7,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -18,6 +19,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -40,6 +43,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -51,6 +55,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -62,6 +67,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -73,6 +79,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -84,6 +91,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -95,6 +103,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -106,6 +115,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -117,6 +127,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -128,6 +139,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -139,6 +151,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -150,6 +163,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -161,6 +175,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -172,6 +187,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -183,6 +199,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -194,6 +211,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -205,6 +223,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -216,6 +235,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -227,6 +247,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -238,6 +259,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -249,6 +271,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -260,6 +283,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -271,6 +295,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -282,6 +307,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -293,6 +319,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -304,6 +331,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -315,6 +343,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -326,6 +355,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -337,6 +367,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -348,6 +379,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -359,6 +391,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -370,6 +403,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -381,6 +415,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -392,6 +427,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -403,6 +439,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -414,6 +451,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -425,6 +463,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -436,6 +475,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -447,6 +487,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -458,6 +499,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -469,6 +511,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -480,6 +523,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -491,6 +535,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -502,6 +547,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -513,6 +559,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -524,6 +571,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -535,6 +583,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -546,6 +595,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -557,6 +607,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -568,6 +619,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -579,6 +631,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -590,6 +643,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -601,6 +655,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -612,6 +667,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -623,6 +679,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -634,6 +691,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -645,6 +703,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -656,6 +715,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -667,6 +727,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -678,6 +739,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -689,6 +751,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r @@ -700,6 +763,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = sext %v to ret %r @@ -711,6 +775,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = zext %v to ret %r @@ -722,6 +787,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = trunc %v to ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir --- a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir +++ b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir @@ -21,6 +21,7 @@ ; CHECK-NEXT: renamable $v0 = COPY killed renamable $v1 ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $v0 = PseudoVADD_VV_M1 killed renamable $v8, killed renamable $v9, 1, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET implicit $v0 %0:vr = COPY $v0 %1:vr = COPY $v1 diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -15,6 +15,7 @@ ; RV64IV-NEXT: addi a0, zero, 1024 ; RV64IV-NEXT: vsetvli zero, a0, e8, m8, tu, mu ; RV64IV-NEXT: vmacc.vv v8, v16, v24 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %ret = call @llvm.riscv.vmacc.nxv64i8.nxv64i8( %arg0, @@ -60,8 +61,12 @@ ; RV64IV-NEXT: call callee@plt ; RV64IV-NEXT: addi sp, s0, -64 ; RV64IV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_def_cfa sp, 64 ; RV64IV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64IV-NEXT: .cfi_restore ra +; RV64IV-NEXT: .cfi_restore s0 ; RV64IV-NEXT: addi sp, sp, 64 +; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local0 = alloca %local1 = alloca diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -12,6 +12,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i8: @@ -19,6 +20,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1i8.nxv1p0i8( %ptrs, i32 1, %m, %passthru) ret %v @@ -32,6 +34,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8: @@ -39,6 +42,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) ret %v @@ -51,6 +55,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i16: @@ -59,6 +64,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = sext %v to @@ -72,6 +78,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i16: @@ -80,6 +87,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = zext %v to @@ -93,6 +101,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i32: @@ -101,6 +110,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = sext %v to @@ -114,6 +124,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i32: @@ -122,6 +133,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = zext %v to @@ -136,6 +148,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf8 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64: @@ -144,6 +157,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf8 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = sext %v to @@ -158,6 +172,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf8 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64: @@ -166,6 +181,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf8 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) %ev = zext %v to @@ -180,6 +196,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i8: @@ -187,6 +204,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i8.nxv4p0i8( %ptrs, i32 1, %m, %passthru) ret %v @@ -198,6 +216,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i8: @@ -205,6 +224,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -216,11 +236,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4i8: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i8: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i8.nxv4p0i8( %ptrs, i32 1, zeroinitializer, %passthru) ret %v @@ -234,6 +256,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i8: @@ -241,6 +264,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i8.nxv8p0i8( %ptrs, i32 1, %m, %passthru) ret %v @@ -254,6 +278,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8: @@ -263,6 +288,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.masked.gather.nxv8i8.nxv8p0i8( %ptrs, i32 1, %m, %passthru) @@ -277,6 +303,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i16: @@ -284,6 +311,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1i16.nxv1p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -297,6 +325,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16: @@ -304,6 +333,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -316,6 +346,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i32: @@ -324,6 +355,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) %ev = sext %v to @@ -337,6 +369,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i32: @@ -345,6 +378,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) %ev = zext %v to @@ -359,6 +393,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64: @@ -367,6 +402,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) %ev = sext %v to @@ -381,6 +417,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64: @@ -389,6 +426,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) %ev = zext %v to @@ -403,6 +441,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i16: @@ -410,6 +449,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i16.nxv4p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -421,6 +461,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i16: @@ -428,6 +469,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -439,11 +481,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4i16: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i16.nxv4p0i16( %ptrs, i32 2, zeroinitializer, %passthru) ret %v @@ -457,6 +501,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i16: @@ -464,6 +509,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -478,6 +524,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i16: @@ -488,6 +535,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) @@ -503,6 +551,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16: @@ -513,6 +562,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -529,6 +579,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16: @@ -539,6 +590,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -555,6 +607,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16: @@ -565,6 +618,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) @@ -579,6 +633,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i32: @@ -586,6 +641,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1i32.nxv1p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -599,6 +655,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32: @@ -606,6 +663,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -619,6 +677,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64: @@ -627,6 +686,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) %ev = sext %v to @@ -641,6 +701,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf2 v10, v9 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64: @@ -649,6 +710,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) %ev = zext %v to @@ -663,6 +725,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i32: @@ -670,6 +733,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i32.nxv4p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -680,6 +744,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i32: @@ -687,6 +752,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -698,11 +764,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i32.nxv4p0i32( %ptrs, i32 4, zeroinitializer, %passthru) ret %v @@ -716,6 +784,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i32: @@ -723,6 +792,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -736,6 +806,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i32: @@ -746,6 +817,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -760,6 +832,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32: @@ -770,6 +843,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -785,6 +859,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32: @@ -795,6 +870,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -810,6 +886,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i32: @@ -820,6 +897,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -834,6 +912,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32: @@ -844,6 +923,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -859,6 +939,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32: @@ -869,6 +950,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -883,6 +965,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32: @@ -893,6 +976,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -907,6 +991,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i64: @@ -914,6 +999,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1i64.nxv1p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -927,6 +1013,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i64: @@ -934,6 +1021,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i64.nxv2p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -947,6 +1035,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i64: @@ -954,6 +1043,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i64.nxv4p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -965,12 +1055,14 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -982,11 +1074,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4i64: ; RV32: # %bb.0: ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i64.nxv4p0i64( %ptrs, i32 8, zeroinitializer, %passthru) ret %v @@ -1000,6 +1094,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i64: @@ -1007,6 +1102,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1021,6 +1117,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i64: @@ -1030,6 +1127,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1044,6 +1142,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64: @@ -1053,6 +1152,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1068,6 +1168,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64: @@ -1077,6 +1178,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1093,6 +1195,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i64: @@ -1102,6 +1205,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1116,6 +1220,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64: @@ -1125,6 +1230,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1140,6 +1246,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64: @@ -1149,6 +1256,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1164,6 +1272,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64: @@ -1173,6 +1282,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1187,6 +1297,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64: @@ -1196,6 +1307,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1211,6 +1323,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64: @@ -1220,6 +1333,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1234,6 +1348,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i64: @@ -1242,6 +1357,7 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1269,6 +1385,7 @@ ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: vs8r.v v24, (a0) ; RV32-NEXT: vs8r.v v16, (a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv16i64: @@ -1301,6 +1418,7 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %p0 = call @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64( undef, %ptrs0, i64 0) %p1 = call @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64( %p0, %ptrs1, i64 8) @@ -1322,6 +1440,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f16: @@ -1329,6 +1448,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1f16.nxv1p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1342,6 +1462,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f16: @@ -1349,6 +1470,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2f16.nxv2p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1362,6 +1484,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f16: @@ -1369,6 +1492,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f16.nxv4p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1380,6 +1504,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f16: @@ -1387,6 +1512,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1398,11 +1524,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4f16: ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f16.nxv4p0f16( %ptrs, i32 2, zeroinitializer, %passthru) ret %v @@ -1416,6 +1544,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f16: @@ -1423,6 +1552,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1437,6 +1567,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f16: @@ -1447,6 +1578,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) @@ -1462,6 +1594,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16: @@ -1472,6 +1605,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1488,6 +1622,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16: @@ -1498,6 +1633,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1514,6 +1650,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f16: @@ -1524,6 +1661,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) @@ -1538,6 +1676,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f32: @@ -1545,6 +1684,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1f32.nxv1p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1558,6 +1698,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f32: @@ -1565,6 +1706,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2f32.nxv2p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1578,6 +1720,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f32: @@ -1585,6 +1728,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1595,6 +1739,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f32: @@ -1602,6 +1747,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1613,11 +1759,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %ptrs, i32 4, zeroinitializer, %passthru) ret %v @@ -1631,6 +1779,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f32: @@ -1638,6 +1787,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1651,6 +1801,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f32: @@ -1661,6 +1812,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1675,6 +1827,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32: @@ -1685,6 +1838,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1700,6 +1854,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32: @@ -1710,6 +1865,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1725,6 +1881,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f32: @@ -1735,6 +1892,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1749,6 +1907,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32: @@ -1759,6 +1918,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1774,6 +1934,7 @@ ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32: @@ -1784,6 +1945,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1798,6 +1960,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f32: @@ -1808,6 +1971,7 @@ ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1822,6 +1986,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f64: @@ -1829,6 +1994,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1f64.nxv1p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1842,6 +2008,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f64: @@ -1849,6 +2016,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2f64.nxv2p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1862,6 +2030,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f64: @@ -1869,6 +2038,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f64.nxv4p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1880,12 +2050,14 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1897,11 +2069,13 @@ ; RV32-LABEL: mgather_falsemask_nxv4f64: ; RV32: # %bb.0: ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_falsemask_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f64.nxv4p0f64( %ptrs, i32 8, zeroinitializer, %passthru) ret %v @@ -1915,6 +2089,7 @@ ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f64: @@ -1922,6 +2097,7 @@ ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1936,6 +2112,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f64: @@ -1945,6 +2122,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -1959,6 +2137,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64: @@ -1968,6 +2147,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1983,6 +2163,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64: @@ -1992,6 +2173,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2008,6 +2190,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f64: @@ -2017,6 +2200,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2031,6 +2215,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64: @@ -2040,6 +2225,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2055,6 +2241,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64: @@ -2064,6 +2251,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2079,6 +2267,7 @@ ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64: @@ -2088,6 +2277,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2102,6 +2292,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64: @@ -2111,6 +2302,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2126,6 +2318,7 @@ ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64: @@ -2135,6 +2328,7 @@ ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2149,6 +2343,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f64: @@ -2157,6 +2352,7 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2173,6 +2369,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv16i8: @@ -2190,6 +2387,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.masked.gather.nxv16i8.nxv16p0i8( %ptrs, i32 2, %m, %passthru) @@ -2214,6 +2412,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv32i8: @@ -2245,6 +2444,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.masked.gather.nxv32i8.nxv32p0i8( %ptrs, i32 2, %m, %passthru) diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -11,12 +11,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i8.nxv1p0i8( %val, %ptrs, i32 1, %m) ret void @@ -29,12 +31,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, i32 1, %m) ret void @@ -46,6 +50,7 @@ ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: @@ -53,6 +58,7 @@ ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, i32 1, %m) @@ -67,6 +73,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: @@ -76,6 +83,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, i32 1, %m) @@ -92,6 +100,7 @@ ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: @@ -103,6 +112,7 @@ ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, i32 1, %m) @@ -116,12 +126,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, i32 1, %m) ret void @@ -132,12 +144,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -148,10 +162,12 @@ define void @mscatter_falsemask_nxv4i8( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4i8: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4i8: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, i32 1, zeroinitializer) ret void @@ -164,12 +180,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, i32 1, %m) ret void @@ -182,6 +200,7 @@ ; RV32-NEXT: vsext.vf4 v12, v9 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8: @@ -190,6 +209,7 @@ ; RV64-NEXT: vsext.vf8 v16, v9 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs call void @llvm.masked.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, i32 1, %m) @@ -203,12 +223,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( %val, %ptrs, i32 2, %m) ret void @@ -221,12 +243,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( %val, %ptrs, i32 2, %m) ret void @@ -238,6 +262,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: @@ -245,6 +270,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, i32 2, %m) @@ -259,6 +285,7 @@ ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: @@ -268,6 +295,7 @@ ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, i32 2, %m) @@ -281,12 +309,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, i32 2, %m) ret void @@ -297,12 +327,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -313,10 +345,12 @@ define void @mscatter_falsemask_nxv4i16( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4i16: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4i16: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, i32 2, zeroinitializer) ret void @@ -329,12 +363,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, i32 2, %m) ret void @@ -348,6 +384,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i16: @@ -357,6 +394,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, i32 2, %m) @@ -371,6 +409,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i16: @@ -380,6 +419,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -395,6 +435,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i16: @@ -404,6 +445,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -419,6 +461,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16: @@ -428,6 +471,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, i32 2, %m) @@ -441,12 +485,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( %val, %ptrs, i32 4, %m) ret void @@ -459,12 +505,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( %val, %ptrs, i32 4, %m) ret void @@ -476,6 +524,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: @@ -483,6 +532,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( %tval, %ptrs, i32 4, %m) @@ -496,12 +546,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, i32 4, %m) ret void @@ -512,12 +564,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -528,10 +582,12 @@ define void @mscatter_falsemask_nxv4i32( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4i32: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4i32: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, i32 4, zeroinitializer) ret void @@ -544,12 +600,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, i32 4, %m) ret void @@ -562,6 +620,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i32: @@ -571,6 +630,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, i32 4, %m) @@ -584,6 +644,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i32: @@ -593,6 +654,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -607,6 +669,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32: @@ -616,6 +679,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -630,6 +694,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i32: @@ -639,6 +704,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, i32 4, %m) @@ -652,6 +718,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32: @@ -661,6 +728,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -675,6 +743,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32: @@ -684,6 +753,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -697,6 +767,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32: @@ -706,6 +777,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, i32 4, %m) @@ -719,12 +791,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( %val, %ptrs, i32 8, %m) ret void @@ -737,12 +811,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( %val, %ptrs, i32 8, %m) ret void @@ -755,12 +831,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, i32 8, %m) ret void @@ -771,12 +849,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -787,10 +867,12 @@ define void @mscatter_falsemask_nxv4i64( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4i64: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4i64: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, i32 8, zeroinitializer) ret void @@ -803,12 +885,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) ret void @@ -822,6 +906,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i64: @@ -830,6 +915,7 @@ ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) @@ -843,6 +929,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64: @@ -851,6 +938,7 @@ ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -865,6 +953,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64: @@ -873,6 +962,7 @@ ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -888,6 +978,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i64: @@ -896,6 +987,7 @@ ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) @@ -909,6 +1001,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64: @@ -917,6 +1010,7 @@ ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -931,6 +1025,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64: @@ -939,6 +1034,7 @@ ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -953,6 +1049,7 @@ ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8i64: @@ -961,6 +1058,7 @@ ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) @@ -974,6 +1072,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64: @@ -982,6 +1081,7 @@ ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -996,6 +1096,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64: @@ -1004,6 +1105,7 @@ ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1017,6 +1119,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i64: @@ -1024,6 +1127,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) @@ -1037,12 +1141,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( %val, %ptrs, i32 2, %m) ret void @@ -1055,12 +1161,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( %val, %ptrs, i32 2, %m) ret void @@ -1073,12 +1181,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, i32 2, %m) ret void @@ -1089,12 +1199,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1105,10 +1217,12 @@ define void @mscatter_falsemask_nxv4f16( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4f16: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4f16: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, i32 2, zeroinitializer) ret void @@ -1121,12 +1235,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, i32 2, %m) ret void @@ -1140,6 +1256,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f16: @@ -1149,6 +1266,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, i32 2, %m) @@ -1163,6 +1281,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f16: @@ -1172,6 +1291,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1187,6 +1307,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f16: @@ -1196,6 +1317,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1211,6 +1333,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f16: @@ -1220,6 +1343,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, i32 2, %m) @@ -1233,12 +1357,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( %val, %ptrs, i32 4, %m) ret void @@ -1251,12 +1377,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( %val, %ptrs, i32 4, %m) ret void @@ -1269,12 +1397,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, i32 4, %m) ret void @@ -1285,12 +1415,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1301,10 +1433,12 @@ define void @mscatter_falsemask_nxv4f32( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4f32: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4f32: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, i32 4, zeroinitializer) ret void @@ -1317,12 +1451,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, i32 4, %m) ret void @@ -1335,6 +1471,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f32: @@ -1344,6 +1481,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, i32 4, %m) @@ -1357,6 +1495,7 @@ ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f32: @@ -1366,6 +1505,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1380,6 +1520,7 @@ ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32: @@ -1389,6 +1530,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1403,6 +1545,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f32: @@ -1412,6 +1555,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, i32 4, %m) @@ -1425,6 +1569,7 @@ ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32: @@ -1434,6 +1579,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1448,6 +1594,7 @@ ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32: @@ -1457,6 +1604,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1470,6 +1618,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f32: @@ -1479,6 +1628,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, i32 4, %m) @@ -1492,12 +1642,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( %val, %ptrs, i32 8, %m) ret void @@ -1510,12 +1662,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( %val, %ptrs, i32 8, %m) ret void @@ -1528,12 +1682,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, i32 8, %m) ret void @@ -1544,12 +1700,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1560,10 +1718,12 @@ define void @mscatter_falsemask_nxv4f64( %val, %ptrs) { ; RV32-LABEL: mscatter_falsemask_nxv4f64: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_falsemask_nxv4f64: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, i32 8, zeroinitializer) ret void @@ -1576,12 +1736,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) ret void @@ -1595,6 +1757,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f64: @@ -1603,6 +1766,7 @@ ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) @@ -1616,6 +1780,7 @@ ; RV32-NEXT: vsext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64: @@ -1624,6 +1789,7 @@ ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1638,6 +1804,7 @@ ; RV32-NEXT: vzext.vf8 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64: @@ -1646,6 +1813,7 @@ ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1661,6 +1829,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f64: @@ -1669,6 +1838,7 @@ ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) @@ -1682,6 +1852,7 @@ ; RV32-NEXT: vsext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64: @@ -1690,6 +1861,7 @@ ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1704,6 +1876,7 @@ ; RV32-NEXT: vzext.vf4 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64: @@ -1712,6 +1885,7 @@ ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1726,6 +1900,7 @@ ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8f64: @@ -1734,6 +1909,7 @@ ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) @@ -1747,6 +1923,7 @@ ; RV32-NEXT: vsext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64: @@ -1755,6 +1932,7 @@ ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1769,6 +1947,7 @@ ; RV32-NEXT: vzext.vf2 v24, v16 ; RV32-NEXT: vsll.vi v16, v24, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64: @@ -1777,6 +1956,7 @@ ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1790,6 +1970,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f64: @@ -1797,6 +1978,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) @@ -1821,6 +2003,7 @@ ; RV32-NEXT: vslidedown.vx v0, v0, a0 ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv16f64: @@ -1848,6 +2031,7 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %p0 = call @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64( undef, %ptrs0, i64 0) %p1 = call @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64( %p0, %ptrs1, i64 8) @@ -1872,6 +2056,7 @@ ; RV32-NEXT: vslidedown.vx v0, v0, a1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv16i8_nxv16f64: @@ -1889,6 +2074,7 @@ ; RV64-NEXT: vsext.vf8 v8, v3 ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v0 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) @@ -1912,6 +2098,7 @@ ; RV32-NEXT: vslidedown.vx v0, v0, a1 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv16i16_nxv16f64: @@ -1929,6 +2116,7 @@ ; RV64-NEXT: vsext.vf4 v8, v6 ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v0 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -58,6 +58,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv1i8: @@ -70,6 +71,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv1i8: @@ -82,6 +84,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv1i8: @@ -95,6 +98,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv1i8: @@ -107,6 +111,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv1i8: @@ -119,6 +124,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i8( %a) ret %res @@ -136,6 +142,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv2i8: @@ -148,6 +155,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv2i8: @@ -160,6 +168,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i8: @@ -173,6 +182,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv2i8: @@ -185,6 +195,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv2i8: @@ -197,6 +208,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i8( %a) ret %res @@ -214,6 +226,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv4i8: @@ -226,6 +239,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv4i8: @@ -238,6 +252,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i8: @@ -251,6 +266,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv4i8: @@ -263,6 +279,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv4i8: @@ -275,6 +292,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i8( %a) ret %res @@ -291,6 +309,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i8: @@ -302,6 +321,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i8: @@ -313,6 +333,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: @@ -325,6 +346,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i8: @@ -336,6 +358,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i8: @@ -347,6 +370,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i8( %a) ret %res @@ -364,6 +388,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i8: @@ -376,6 +401,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-256-NEXT: vmv2r.v v8, v10 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i8: @@ -388,6 +414,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-512-NEXT: vmv2r.v v8, v10 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8: @@ -401,6 +428,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i8: @@ -413,6 +441,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-256-NEXT: vmv2r.v v8, v10 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i8: @@ -425,6 +454,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-512-NEXT: vmv2r.v v8, v10 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i8( %a) ret %res @@ -442,6 +472,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i8: @@ -454,6 +485,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-256-NEXT: vmv4r.v v8, v12 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i8: @@ -466,6 +498,7 @@ ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-512-NEXT: vmv4r.v v8, v12 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8: @@ -479,6 +512,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i8: @@ -491,6 +525,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-256-NEXT: vmv4r.v v8, v12 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i8: @@ -503,6 +538,7 @@ ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-512-NEXT: vmv4r.v v8, v12 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i8( %a) ret %res @@ -521,6 +557,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i8: @@ -533,6 +570,7 @@ ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 ; RV32-BITS-256-NEXT: vmv8r.v v8, v16 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: @@ -546,6 +584,7 @@ ; RV32-BITS-512-NEXT: vrgather.vv v20, v8, v24 ; RV32-BITS-512-NEXT: vrgather.vv v16, v12, v24 ; RV32-BITS-512-NEXT: vmv8r.v v8, v16 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8: @@ -560,6 +599,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i8: @@ -572,6 +612,7 @@ ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 ; RV64-BITS-256-NEXT: vmv8r.v v8, v16 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: @@ -585,6 +626,7 @@ ; RV64-BITS-512-NEXT: vrgather.vv v20, v8, v24 ; RV64-BITS-512-NEXT: vrgather.vv v16, v12, v24 ; RV64-BITS-512-NEXT: vmv8r.v v8, v16 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv64i8( %a) ret %res @@ -601,6 +643,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i16( %a) ret %res @@ -617,6 +660,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i16( %a) ret %res @@ -633,6 +677,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i16( %a) ret %res @@ -648,6 +693,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i16( %a) ret %res @@ -664,6 +710,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i16( %a) ret %res @@ -680,6 +727,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i16( %a) ret %res @@ -696,6 +744,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i32( %a) ret %res @@ -712,6 +761,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i32( %a) ret %res @@ -728,6 +778,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i32( %a) ret %res @@ -743,6 +794,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i32( %a) ret %res @@ -759,6 +811,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i32( %a) ret %res @@ -775,6 +828,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i64( %a) ret %res @@ -791,6 +845,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i64( %a) ret %res @@ -807,6 +862,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i64( %a) ret %res @@ -822,6 +878,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i64( %a) ret %res @@ -842,6 +899,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f16( %a) ret %res @@ -858,6 +916,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f16( %a) ret %res @@ -874,6 +933,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f16( %a) ret %res @@ -889,6 +949,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f16( %a) ret %res @@ -905,6 +966,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f16( %a) ret %res @@ -921,6 +983,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32f16( %a) ret %res @@ -937,6 +1000,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f32( %a) ret %res @@ -953,6 +1017,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f32( %a) ret %res @@ -969,6 +1034,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f32( %a) ret %res @@ -984,6 +1050,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f32( %a) ret %res @@ -1000,6 +1067,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f32( %a) ret %res @@ -1016,6 +1084,7 @@ ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f64( %a) ret %res @@ -1032,6 +1101,7 @@ ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f64( %a) ret %res @@ -1048,6 +1118,7 @@ ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f64( %a) ret %res @@ -1063,6 +1134,7 @@ ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f64( %a) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v4, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll @@ -91,6 +91,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a0, a0, 3 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32-LABEL: vscale_select: @@ -98,6 +99,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 ; RV32-NEXT: mv a1, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret %a = call i64 @llvm.vscale.i64() %b = and i64 %a, 4294967295 diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call { , } @llvm.sadd.with.overflow.nxv2i32( %x, %y) %b = extractvalue { , } %a, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -40,6 +42,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -68,6 +72,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -82,6 +87,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -96,6 +102,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -110,6 +117,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -124,6 +132,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -138,6 +147,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -152,6 +162,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -166,6 +177,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b %v = select i1 %cmp, %c, %d @@ -180,6 +192,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -194,6 +207,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, %c, %d @@ -208,6 +222,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -222,6 +237,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, %c, %d @@ -236,6 +252,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -250,6 +267,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, %c, %d @@ -264,6 +282,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -278,6 +297,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, %c, %d @@ -292,6 +312,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -306,6 +327,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b %v = select i1 %cmp, %c, %d @@ -320,6 +342,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -334,6 +357,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, %c, %d @@ -348,6 +372,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -362,6 +387,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, %c, %d @@ -376,6 +402,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -390,6 +417,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, %c, %d @@ -404,6 +432,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -418,6 +447,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b %v = select i1 %cmp, %c, %d diff --git a/llvm/test/CodeGen/RISCV/rvv/select-int.ll b/llvm/test/CodeGen/RISCV/rvv/select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-int.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -29,6 +30,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -60,6 +63,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -75,6 +79,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -91,6 +96,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -106,6 +112,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -122,6 +129,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -137,6 +145,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -153,6 +162,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -168,6 +178,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -184,6 +195,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -199,6 +211,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -215,6 +228,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i1 %a, %b %v = select i1 %cmp, %c, %d @@ -228,6 +242,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -242,6 +257,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -255,6 +271,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -269,6 +286,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -282,6 +300,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -296,6 +315,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -309,6 +329,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -323,6 +344,7 @@ ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -336,6 +358,7 @@ ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -350,6 +373,7 @@ ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -363,6 +387,7 @@ ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -377,6 +402,7 @@ ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -390,6 +416,7 @@ ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -404,6 +431,7 @@ ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i8 %a, %b %v = select i1 %cmp, %c, %d @@ -418,6 +446,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -433,6 +462,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -447,6 +477,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -462,6 +493,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -476,6 +508,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -491,6 +524,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -505,6 +539,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -520,6 +555,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -534,6 +570,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -549,6 +586,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -563,6 +601,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -578,6 +617,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b %v = select i1 %cmp, %c, %d @@ -592,6 +632,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -607,6 +648,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, %c, %d @@ -621,6 +663,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -636,6 +679,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, %c, %d @@ -650,6 +694,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -665,6 +710,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, %c, %d @@ -679,6 +725,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -694,6 +741,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, %c, %d @@ -708,6 +756,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -723,6 +772,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b %v = select i1 %cmp, %c, %d @@ -737,6 +787,7 @@ ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -754,6 +805,7 @@ ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_nxv1i64: @@ -765,6 +817,7 @@ ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, %c, %d @@ -779,6 +832,7 @@ ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -796,6 +850,7 @@ ; RV32-NEXT: vmsne.vi v0, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_nxv2i64: @@ -807,6 +862,7 @@ ; RV64-NEXT: vmsne.vi v0, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, %c, %d @@ -821,6 +877,7 @@ ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -838,6 +895,7 @@ ; RV32-NEXT: vmsne.vi v0, v16, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_nxv4i64: @@ -849,6 +907,7 @@ ; RV64-NEXT: vmsne.vi v0, v16, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, %c, %d @@ -863,6 +922,7 @@ ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b ret %v @@ -880,6 +940,7 @@ ; RV32-NEXT: vmsne.vi v0, v24, 0 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selectcc_nxv8i64: @@ -891,6 +952,7 @@ ; RV64-NEXT: vmsne.vi v0, v24, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV64-NEXT: vmerge.vvm v8, v16, v8, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b %v = select i1 %cmp, %c, %d diff --git a/llvm/test/CodeGen/RISCV/rvv/select-sra.ll b/llvm/test/CodeGen/RISCV/rvv/select-sra.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-sra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-sra.ll @@ -15,6 +15,7 @@ ; RV32-NEXT: lui a0, 214376 ; RV32-NEXT: addi a0, a0, -2030 ; RV32-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vselect_of_consts: @@ -26,6 +27,7 @@ ; RV64-NEXT: lui a0, 214376 ; RV64-NEXT: addiw a0, a0, -2030 ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = select <4 x i1> %cc, <4 x i32> , <4 x i32> ret <4 x i32> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll @@ -10,6 +10,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -20,6 +21,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -44,6 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -54,6 +58,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -76,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -88,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -110,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -122,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -132,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -144,6 +156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -156,6 +169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -166,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -178,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -188,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -200,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -212,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -222,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -234,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -244,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -256,6 +278,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -268,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -278,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +317,7 @@ ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -304,6 +330,7 @@ ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -318,6 +345,7 @@ ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -330,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -340,6 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +384,7 @@ ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -367,6 +398,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -382,6 +414,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -396,6 +429,7 @@ ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -409,6 +443,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +458,7 @@ ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -435,6 +471,7 @@ ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -449,6 +486,7 @@ ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -461,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -471,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -484,6 +524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v8, v10 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -495,6 +536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -508,6 +550,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -520,6 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -530,6 +574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -543,6 +588,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -554,6 +600,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -567,6 +614,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -579,6 +627,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -589,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +652,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -613,6 +664,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -626,6 +678,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -638,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -648,6 +702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -661,6 +716,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v10, v8 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -672,6 +728,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -685,6 +742,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -697,6 +755,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -707,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -719,6 +779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -729,6 +790,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -741,6 +803,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -753,6 +816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -763,6 +827,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -777,6 +842,7 @@ ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -790,6 +856,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -805,6 +872,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -819,6 +887,7 @@ ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -832,6 +901,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -854,6 +925,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -866,6 +938,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -878,6 +951,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -888,6 +962,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -900,6 +975,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -910,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -922,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -934,6 +1012,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -944,6 +1023,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -956,6 +1036,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -966,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -978,6 +1060,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -990,6 +1073,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1000,6 +1084,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1012,6 +1097,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1022,6 +1108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1034,6 +1121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1046,6 +1134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1056,6 +1145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1068,6 +1158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1078,6 +1169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1090,6 +1182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1102,6 +1195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1112,6 +1206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1126,6 +1221,7 @@ ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1138,6 +1234,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1152,6 +1249,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1164,6 +1262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1174,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1188,6 +1288,7 @@ ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -1201,6 +1302,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1216,6 +1318,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1230,6 +1333,7 @@ ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -1243,6 +1347,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1257,6 +1362,7 @@ ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -1269,6 +1375,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1283,6 +1390,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1295,6 +1403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -1305,6 +1414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1318,6 +1428,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v8, v12 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -1329,6 +1440,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1342,6 +1454,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1354,6 +1467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -1364,6 +1478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1377,6 +1492,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -1388,6 +1504,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1401,6 +1518,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1413,6 +1531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -1423,6 +1542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1436,6 +1556,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -1447,6 +1568,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1460,6 +1582,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1472,6 +1595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -1482,6 +1606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1495,6 +1620,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -1506,6 +1632,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1519,6 +1646,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1531,6 +1659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -1541,6 +1670,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1553,6 +1683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -1563,6 +1694,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1575,6 +1707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1587,6 +1720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -1597,6 +1731,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1611,6 +1746,7 @@ ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -1624,6 +1760,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1639,6 +1776,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1653,6 +1791,7 @@ ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -1666,6 +1805,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1678,6 +1818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -1688,6 +1829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1700,6 +1842,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1712,6 +1855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -1722,6 +1866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1734,6 +1879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -1744,6 +1890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1756,6 +1903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1768,6 +1916,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -1778,6 +1927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1790,6 +1940,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1800,6 +1951,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1812,6 +1964,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1824,6 +1977,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1834,6 +1988,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1846,6 +2001,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1856,6 +2012,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1868,6 +2025,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1880,6 +2038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1890,6 +2049,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1902,6 +2062,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1912,6 +2073,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1924,6 +2086,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1936,6 +2099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1946,6 +2110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1960,6 +2125,7 @@ ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmor.mm v0, v25, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1972,6 +2138,7 @@ ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1986,6 +2153,7 @@ ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1998,6 +2166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -2008,6 +2177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2022,6 +2192,7 @@ ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -2035,6 +2206,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2050,6 +2222,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2064,6 +2237,7 @@ ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -2077,6 +2251,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2091,6 +2266,7 @@ ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmnor.mm v0, v25, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -2103,6 +2279,7 @@ ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2117,6 +2294,7 @@ ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2129,6 +2307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -2139,6 +2318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2152,6 +2332,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -2163,6 +2344,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2176,6 +2358,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2188,6 +2371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -2198,6 +2382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2211,6 +2396,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -2222,6 +2408,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2235,6 +2422,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2247,6 +2435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -2257,6 +2446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2270,6 +2460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v16, v8 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -2281,6 +2472,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2294,6 +2486,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2306,6 +2499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -2316,6 +2510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2329,6 +2524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v16, v8 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -2340,6 +2536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2353,6 +2550,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2365,6 +2563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -2375,6 +2574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2387,6 +2587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -2397,6 +2598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2409,6 +2611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2421,6 +2624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -2431,6 +2635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2445,6 +2650,7 @@ ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -2458,6 +2664,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2473,6 +2680,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2487,6 +2695,7 @@ ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -2500,6 +2709,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2521,6 +2731,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, zeroinitializer ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll @@ -10,6 +10,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -20,6 +21,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -44,6 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -54,6 +58,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -76,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -88,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -110,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -122,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -132,6 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -144,6 +156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -156,6 +169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -166,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -178,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -188,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -200,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -212,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -222,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -234,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -244,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -256,6 +278,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -268,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -278,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +317,7 @@ ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -304,6 +330,7 @@ ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -318,6 +345,7 @@ ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -330,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -340,6 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +384,7 @@ ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -367,6 +398,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -382,6 +414,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -396,6 +429,7 @@ ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -409,6 +443,7 @@ ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +458,7 @@ ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -435,6 +471,7 @@ ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -449,6 +486,7 @@ ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -461,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -471,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -484,6 +524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v8, v10 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -495,6 +536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -508,6 +550,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -520,6 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -530,6 +574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -543,6 +588,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -554,6 +600,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -567,6 +614,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -579,6 +627,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -589,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +652,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -613,6 +664,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -626,6 +678,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -638,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -648,6 +702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -661,6 +716,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v10, v8 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -672,6 +728,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -685,6 +742,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -697,6 +755,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -707,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -719,6 +779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -729,6 +790,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -741,6 +803,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -753,6 +816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -763,6 +827,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -777,6 +842,7 @@ ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -790,6 +856,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -805,6 +872,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -819,6 +887,7 @@ ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -832,6 +901,7 @@ ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -854,6 +925,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -866,6 +938,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -878,6 +951,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -888,6 +962,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -900,6 +975,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -910,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -922,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -934,6 +1012,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -944,6 +1023,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -956,6 +1036,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -966,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -978,6 +1060,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -990,6 +1073,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1000,6 +1084,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1012,6 +1097,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1022,6 +1108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1034,6 +1121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1046,6 +1134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1056,6 +1145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1068,6 +1158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1078,6 +1169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1090,6 +1182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1102,6 +1195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1112,6 +1206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1126,6 +1221,7 @@ ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1138,6 +1234,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1152,6 +1249,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1164,6 +1262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1174,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1188,6 +1288,7 @@ ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -1201,6 +1302,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1216,6 +1318,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1230,6 +1333,7 @@ ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -1243,6 +1347,7 @@ ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1257,6 +1362,7 @@ ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -1269,6 +1375,7 @@ ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1283,6 +1390,7 @@ ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1295,6 +1403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -1305,6 +1414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1318,6 +1428,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v8, v12 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -1329,6 +1440,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1342,6 +1454,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1354,6 +1467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -1364,6 +1478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1377,6 +1492,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -1388,6 +1504,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1401,6 +1518,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1413,6 +1531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -1423,6 +1542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1436,6 +1556,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -1447,6 +1568,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1460,6 +1582,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1472,6 +1595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -1482,6 +1606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1495,6 +1620,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -1506,6 +1632,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1519,6 +1646,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1531,6 +1659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -1541,6 +1670,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1553,6 +1683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -1563,6 +1694,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1575,6 +1707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1587,6 +1720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -1597,6 +1731,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1611,6 +1746,7 @@ ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -1624,6 +1760,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1639,6 +1776,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1653,6 +1791,7 @@ ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -1666,6 +1805,7 @@ ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1678,6 +1818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -1688,6 +1829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1700,6 +1842,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1712,6 +1855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb ret %vc @@ -1722,6 +1866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1734,6 +1879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -1744,6 +1890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1756,6 +1903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1768,6 +1916,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb ret %vc @@ -1778,6 +1927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1790,6 +1940,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1800,6 +1951,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1812,6 +1964,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1824,6 +1977,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb ret %vc @@ -1834,6 +1988,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1846,6 +2001,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1856,6 +2012,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1868,6 +2025,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1880,6 +2038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb ret %vc @@ -1890,6 +2049,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1902,6 +2062,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1912,6 +2073,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1924,6 +2086,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1936,6 +2099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb ret %vc @@ -1946,6 +2110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1960,6 +2125,7 @@ ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmor.mm v0, v25, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -1972,6 +2138,7 @@ ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1986,6 +2153,7 @@ ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1998,6 +2166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb ret %vc @@ -2008,6 +2177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2022,6 +2192,7 @@ ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -2035,6 +2206,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2050,6 +2222,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2064,6 +2237,7 @@ ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb ret %vc @@ -2077,6 +2251,7 @@ ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2091,6 +2266,7 @@ ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmnor.mm v0, v25, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -2103,6 +2279,7 @@ ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2117,6 +2294,7 @@ ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2129,6 +2307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb ret %vc @@ -2139,6 +2318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2152,6 +2332,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -2163,6 +2344,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2176,6 +2358,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2188,6 +2371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -2198,6 +2382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2211,6 +2396,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -2222,6 +2408,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2235,6 +2422,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2247,6 +2435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -2257,6 +2446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2270,6 +2460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v16, v8 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -2281,6 +2472,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2294,6 +2486,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2306,6 +2499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -2316,6 +2510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2329,6 +2524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v16, v8 ; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -2340,6 +2536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2353,6 +2550,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2365,6 +2563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -2375,6 +2574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2387,6 +2587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -2397,6 +2598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2409,6 +2611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2421,6 +2624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb ret %vc @@ -2431,6 +2635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2445,6 +2650,7 @@ ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -2458,6 +2664,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2473,6 +2680,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2487,6 +2695,7 @@ ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb ret %vc @@ -2500,6 +2709,7 @@ ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2521,6 +2731,7 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fcmp oeq %va, zeroinitializer ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -6,6 +6,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -16,6 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmseq.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -65,6 +70,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -87,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -124,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -134,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +159,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -182,6 +197,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -195,6 +211,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -208,6 +225,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -220,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -232,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -244,6 +264,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -256,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -268,6 +290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -280,6 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +316,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -302,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -315,6 +341,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -328,6 +355,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -340,6 +368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -352,6 +381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -364,6 +394,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -376,6 +407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -388,6 +420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -403,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, zero +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl) %vc = icmp ult %va, %splat @@ -414,6 +448,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -424,6 +459,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -437,6 +473,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -449,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -461,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -471,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -484,6 +524,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -496,6 +537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -508,6 +550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -519,6 +562,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -532,6 +576,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -545,6 +590,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -557,6 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -569,6 +616,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -581,6 +629,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -593,6 +642,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -605,6 +655,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -615,6 +666,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -628,6 +680,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -641,6 +694,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -653,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -665,6 +720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -677,6 +733,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -689,6 +746,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -701,6 +759,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -711,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -724,6 +784,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -736,6 +797,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -748,6 +810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -758,6 +821,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -771,6 +835,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmseq.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -783,6 +848,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -795,6 +861,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -807,6 +874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -819,6 +887,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -829,6 +898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -842,6 +912,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -854,6 +925,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -866,6 +938,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -876,6 +949,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -889,6 +963,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -901,6 +976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -913,6 +989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -924,6 +1001,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -937,6 +1015,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -950,6 +1029,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -962,6 +1042,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -974,6 +1055,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -986,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -998,6 +1081,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1010,6 +1094,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1022,6 +1107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1037,6 +1123,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl) %vc = icmp uge %va, %splat @@ -1048,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -1058,6 +1146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1071,6 +1160,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1084,6 +1174,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1096,6 +1187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1108,6 +1200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1120,6 +1213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1132,6 +1226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1144,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1156,6 +1252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -1166,6 +1263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1179,6 +1277,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1191,6 +1290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1203,6 +1303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -1213,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1226,6 +1328,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1238,6 +1341,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1250,6 +1354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -1261,6 +1366,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1274,6 +1380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1287,6 +1394,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1299,6 +1407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1311,6 +1420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1323,6 +1433,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1335,6 +1446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1347,6 +1459,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -1357,6 +1470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1370,6 +1484,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1383,6 +1498,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1395,6 +1511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1407,6 +1524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1419,6 +1537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1431,6 +1550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1443,6 +1563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -1453,6 +1574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1466,6 +1588,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1478,6 +1601,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1490,6 +1614,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -1500,6 +1625,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1513,6 +1639,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmseq.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1525,6 +1652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1537,6 +1665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1549,6 +1678,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1561,6 +1691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -1571,6 +1702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1584,6 +1716,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1596,6 +1729,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1608,6 +1742,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -1618,6 +1753,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1631,6 +1767,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1643,6 +1780,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1655,6 +1793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -1666,6 +1805,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1679,6 +1819,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1692,6 +1833,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1704,6 +1846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1716,6 +1859,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1728,6 +1872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1740,6 +1885,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1752,6 +1898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1764,6 +1911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1776,6 +1924,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -1786,6 +1935,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1799,6 +1949,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1812,6 +1963,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1824,6 +1976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1836,6 +1989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1848,6 +2002,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1860,6 +2015,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1872,6 +2028,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1884,6 +2041,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -1894,6 +2052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1907,6 +2066,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1919,6 +2079,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1931,6 +2092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -1941,6 +2103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1954,6 +2117,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1966,6 +2130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1978,6 +2143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -1989,6 +2155,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2002,6 +2169,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2015,6 +2183,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2027,6 +2196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2039,6 +2209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2051,6 +2222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2063,6 +2235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2075,6 +2248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -2085,6 +2259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2098,6 +2273,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2111,6 +2287,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2123,6 +2300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2135,6 +2313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2147,6 +2326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2159,6 +2339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2171,6 +2352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -2181,6 +2363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2194,6 +2377,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2206,6 +2390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2218,6 +2403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -2235,6 +2421,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2254,6 +2441,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2266,6 +2454,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2278,6 +2467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2290,6 +2480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2302,6 +2493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -2319,6 +2511,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2338,6 +2531,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2350,6 +2544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2362,6 +2557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -2379,6 +2575,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2398,6 +2595,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2410,6 +2608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2422,6 +2621,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -2439,6 +2639,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2458,6 +2659,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2471,6 +2673,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2483,6 +2686,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2495,6 +2699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2507,6 +2712,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2519,6 +2725,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2531,6 +2738,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2543,6 +2751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2555,6 +2764,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -2572,6 +2782,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2591,6 +2802,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2604,6 +2816,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2616,6 +2829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2628,6 +2842,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2640,6 +2855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2652,6 +2868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2664,6 +2881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2676,6 +2894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -2693,6 +2912,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2712,6 +2932,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2724,6 +2945,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2736,6 +2958,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -2753,6 +2976,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2772,6 +2996,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2784,6 +3009,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2796,6 +3022,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -2813,6 +3040,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2832,6 +3060,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2845,6 +3074,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2857,6 +3087,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2869,6 +3100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2881,6 +3113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2893,6 +3126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2905,6 +3139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -2922,6 +3157,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2941,6 +3177,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2954,6 +3191,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2966,6 +3204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2978,6 +3217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2990,6 +3230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3002,6 +3243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3014,6 +3256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -3031,6 +3274,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3050,6 +3294,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v16, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3062,6 +3307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3078,6 +3324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %heada = insertelement undef, i8 5, i32 0 %splata = shufflevector %heada, undef, zeroinitializer @@ -3100,6 +3347,7 @@ ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, zeroinitializer ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -6,6 +6,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -16,6 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmseq.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -65,6 +70,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -87,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -124,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -134,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +159,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -182,6 +197,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -195,6 +211,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -208,6 +225,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -220,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -232,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -244,6 +264,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -256,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -268,6 +290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -280,6 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -295,6 +319,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl) %vc = icmp uge %va, %splat @@ -306,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -316,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -329,6 +356,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -342,6 +370,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +383,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -366,6 +396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -378,6 +409,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -390,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -402,6 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -417,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, zero +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl) %vc = icmp ult %va, %splat @@ -428,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -438,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -451,6 +488,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -463,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -475,6 +514,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -485,6 +525,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -498,6 +539,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -510,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -522,6 +565,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -533,6 +577,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -546,6 +591,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -559,6 +605,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -571,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -583,6 +631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -595,6 +644,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -607,6 +657,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -619,6 +670,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -629,6 +681,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -642,6 +695,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -655,6 +709,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -667,6 +722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -679,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -691,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -703,6 +761,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -715,6 +774,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -725,6 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -738,6 +799,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -750,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -762,6 +825,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -772,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -785,6 +850,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmseq.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -797,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -809,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -821,6 +889,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -833,6 +902,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -843,6 +913,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -856,6 +927,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -868,6 +940,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -880,6 +953,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -890,6 +964,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -903,6 +978,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -915,6 +991,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -927,6 +1004,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -938,6 +1016,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -951,6 +1030,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -964,6 +1044,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -976,6 +1057,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -988,6 +1070,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1000,6 +1083,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1012,6 +1096,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1024,6 +1109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1036,6 +1122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1048,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -1058,6 +1146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1071,6 +1160,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1084,6 +1174,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1096,6 +1187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1108,6 +1200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1120,6 +1213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1132,6 +1226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1144,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1156,6 +1252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -1166,6 +1263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1179,6 +1277,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1191,6 +1290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1203,6 +1303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -1213,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1226,6 +1328,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1238,6 +1341,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1250,6 +1354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -1261,6 +1366,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1274,6 +1380,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1287,6 +1394,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1299,6 +1407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1311,6 +1420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1323,6 +1433,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1335,6 +1446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1347,6 +1459,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -1357,6 +1470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1370,6 +1484,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1383,6 +1498,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1395,6 +1511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1407,6 +1524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1419,6 +1537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1431,6 +1550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1443,6 +1563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -1453,6 +1574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1466,6 +1588,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1478,6 +1601,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1490,6 +1614,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -1500,6 +1625,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1513,6 +1639,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmseq.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1525,6 +1652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1537,6 +1665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1549,6 +1678,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1561,6 +1691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -1571,6 +1702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1584,6 +1716,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1596,6 +1729,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1608,6 +1742,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -1618,6 +1753,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1631,6 +1767,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1643,6 +1780,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1655,6 +1793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -1666,6 +1805,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1679,6 +1819,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1692,6 +1833,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1704,6 +1846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1716,6 +1859,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1728,6 +1872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1740,6 +1885,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1752,6 +1898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1764,6 +1911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1776,6 +1924,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -1786,6 +1935,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1799,6 +1949,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1812,6 +1963,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1824,6 +1976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1836,6 +1989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1848,6 +2002,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1860,6 +2015,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1872,6 +2028,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1884,6 +2041,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -1894,6 +2052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1907,6 +2066,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1919,6 +2079,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1931,6 +2092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -1941,6 +2103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1954,6 +2117,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1966,6 +2130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1978,6 +2143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -1989,6 +2155,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2002,6 +2169,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2015,6 +2183,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2027,6 +2196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2039,6 +2209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2051,6 +2222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2063,6 +2235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2075,6 +2248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -2085,6 +2259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2098,6 +2273,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2111,6 +2287,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2123,6 +2300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2135,6 +2313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2147,6 +2326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2159,6 +2339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2171,6 +2352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -2181,6 +2363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2194,6 +2377,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2206,6 +2390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2218,6 +2403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb ret %vc @@ -2228,6 +2414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2241,6 +2428,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2253,6 +2441,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2265,6 +2454,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2277,6 +2467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2289,6 +2480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsne.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb ret %vc @@ -2299,6 +2491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsne.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2312,6 +2505,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2324,6 +2518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2336,6 +2531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb ret %vc @@ -2346,6 +2542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2359,6 +2556,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2371,6 +2569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2383,6 +2582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb ret %vc @@ -2394,6 +2594,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2407,6 +2608,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2420,6 +2622,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2432,6 +2635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2444,6 +2648,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2456,6 +2661,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2468,6 +2674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2480,6 +2687,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2492,6 +2700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2504,6 +2713,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb ret %vc @@ -2514,6 +2724,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2527,6 +2738,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2540,6 +2752,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2552,6 +2765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2564,6 +2778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2576,6 +2791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2588,6 +2804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2600,6 +2817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2612,6 +2830,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb ret %vc @@ -2622,6 +2841,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2635,6 +2855,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2647,6 +2868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsleu.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2659,6 +2881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb ret %vc @@ -2669,6 +2892,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2682,6 +2906,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2694,6 +2919,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2706,6 +2932,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb ret %vc @@ -2717,6 +2944,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2730,6 +2958,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2743,6 +2972,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2755,6 +2985,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2767,6 +2998,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2779,6 +3011,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2791,6 +3024,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2803,6 +3037,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb ret %vc @@ -2813,6 +3048,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2826,6 +3062,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2839,6 +3076,7 @@ ; CHECK-NEXT: addi a0, zero, -16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2851,6 +3089,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2863,6 +3102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2875,6 +3115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2887,6 +3128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2899,6 +3141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb ret %vc @@ -2909,6 +3152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2922,6 +3166,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2934,6 +3179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmsle.vi v0, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 5, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2954,6 +3200,7 @@ ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vslideup.vx v0, v24, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = icmp eq %va, zeroinitializer ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB0_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -52,6 +53,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB1_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -88,6 +90,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB2_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -124,6 +127,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB3_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -160,6 +164,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB4_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -196,6 +201,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB5_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -232,6 +238,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB6_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -295,6 +302,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB7_6 ; CHECK-NEXT: .LBB7_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -388,6 +396,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB8_6 ; CHECK-NEXT: .LBB8_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -481,6 +490,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB9_6 ; CHECK-NEXT: .LBB9_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -574,6 +584,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB10_6 ; CHECK-NEXT: .LBB10_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -667,6 +678,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB11_6 ; CHECK-NEXT: .LBB11_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -760,6 +772,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB12_6 ; CHECK-NEXT: .LBB12_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -853,6 +866,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB13_6 ; CHECK-NEXT: .LBB13_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -919,6 +933,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB14_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -955,6 +970,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB15_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -991,6 +1007,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB16_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 @@ -1054,6 +1071,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB17_6 ; CHECK-NEXT: .LBB17_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1147,6 +1165,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a4, .LBB18_6 ; CHECK-NEXT: .LBB18_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1240,6 +1259,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a1, a3, .LBB19_6 ; CHECK-NEXT: .LBB19_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1307,6 +1327,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB20_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1344,6 +1365,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB21_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1381,6 +1403,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB22_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1418,6 +1441,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB23_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1455,6 +1479,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB24_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1492,6 +1517,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a1, .LBB25_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -1555,6 +1581,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB26_6 ; CHECK-NEXT: .LBB26_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1648,6 +1675,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB27_6 ; CHECK-NEXT: .LBB27_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1741,6 +1769,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB28_6 ; CHECK-NEXT: .LBB28_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1834,6 +1863,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB29_6 ; CHECK-NEXT: .LBB29_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -1927,6 +1957,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB30_6 ; CHECK-NEXT: .LBB30_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -2020,6 +2051,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a2, a1, .LBB31_6 ; CHECK-NEXT: .LBB31_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -2089,6 +2121,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB32_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -2131,6 +2164,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: bnez a2, .LBB33_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 @@ -2203,6 +2237,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a3, a2, .LBB34_6 ; CHECK-NEXT: .LBB34_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() @@ -2307,6 +2342,7 @@ ; CHECK-NEXT: addi a0, a0, 4 ; CHECK-NEXT: bgeu a3, a2, .LBB35_6 ; CHECK-NEXT: .LBB35_7: # %for.cond.cleanup +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call i64 @llvm.vscale.i64() diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i8() ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i8() ret %v @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i8() ret %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i8() ret %v @@ -56,6 +60,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call @llvm.experimental.stepvector.nxv8i8() @@ -71,6 +76,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: addi a0, zero, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i8 3, i32 0 @@ -86,6 +92,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i8 2, i32 0 @@ -102,6 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i8() ret %v @@ -114,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv32i8() ret %v @@ -126,6 +135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv64i8() ret %v @@ -138,6 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i16() ret %v @@ -150,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i16() ret %v @@ -162,6 +174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i16() ret %v @@ -174,6 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i16() ret %v @@ -186,6 +200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i16() ret %v @@ -197,6 +212,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call @llvm.experimental.stepvector.nxv16i16() @@ -212,6 +228,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: addi a0, zero, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i16 3, i32 0 @@ -227,6 +244,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i16 2, i32 0 @@ -243,6 +261,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv32i16() ret %v @@ -255,6 +274,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i32() ret %v @@ -267,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i32() ret %v @@ -279,6 +300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i32() ret %v @@ -291,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i32() ret %v @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i32() ret %v @@ -314,6 +338,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call @llvm.experimental.stepvector.nxv16i32() @@ -329,6 +354,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: addi a0, zero, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i32 3, i32 0 @@ -344,6 +370,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i32 2, i32 0 @@ -360,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i64() ret %v @@ -372,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i64() ret %v @@ -384,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i64() ret %v @@ -396,6 +426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i64() ret %v @@ -407,6 +438,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = call @llvm.experimental.stepvector.nxv8i64() @@ -422,6 +454,7 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: addi a0, zero, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i64 3, i32 0 @@ -447,6 +480,7 @@ ; RV32-NEXT: vid.v v16 ; RV32-NEXT: vmul.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mul_bigimm_stepvector_nxv8i64: @@ -458,6 +492,7 @@ ; RV64-NEXT: slli a0, a0, 12 ; RV64-NEXT: addi a0, a0, -683 ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = insertelement poison, i64 33333333333, i32 0 @@ -474,6 +509,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = insertelement poison, i64 2, i32 0 @@ -499,6 +535,7 @@ ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: stepvector_nxv16i64: @@ -507,6 +544,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vx v16, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i64() ret %v @@ -528,6 +566,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: add_stepvector_nxv16i64: @@ -538,6 +577,7 @@ ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsll.vi v8, v8, 1 ; RV64-NEXT: vadd.vx v16, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = call @llvm.experimental.stepvector.nxv16i64() @@ -564,6 +604,7 @@ ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mul_stepvector_nxv16i64: @@ -576,6 +617,7 @@ ; RV64-NEXT: slli a1, a0, 1 ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: vadd.vx v16, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = insertelement poison, i64 3, i32 0 @@ -617,6 +659,7 @@ ; RV32-NEXT: vmul.vv v8, v24, v8 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: mul_bigimm_stepvector_nxv16i64: @@ -631,6 +674,7 @@ ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vmul.vx v8, v8, a1 ; RV64-NEXT: vadd.vx v16, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = insertelement poison, i64 33333333333, i32 0 @@ -656,6 +700,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v16, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: shl_stepvector_nxv16i64: @@ -666,6 +711,7 @@ ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vadd.vx v16, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = insertelement poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll --- a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 2 ret %v @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 4 ret %v @@ -38,6 +41,7 @@ ; CHECK-LABEL: unaligned_load_nxv1i64_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -47,6 +51,7 @@ ; CHECK-LABEL: unaligned_load_nxv1i64_a4: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 4 ret %v @@ -56,6 +61,7 @@ ; CHECK-LABEL: aligned_load_nxv1i64_a8: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 8 ret %v @@ -65,6 +71,7 @@ ; CHECK-LABEL: unaligned_load_nxv2i64_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -74,6 +81,7 @@ ; CHECK-LABEL: unaligned_load_nxv2i64_a4: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 4 ret %v @@ -83,6 +91,7 @@ ; CHECK-LABEL: aligned_load_nxv2i64_a8: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 8 ret %v @@ -94,6 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -103,6 +113,7 @@ ; CHECK-LABEL: unaligned_load_nxv4f32_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -112,6 +123,7 @@ ; CHECK-LABEL: unaligned_load_nxv4f32_a2: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 2 ret %v @@ -121,6 +133,7 @@ ; CHECK-LABEL: aligned_load_nxv4f32_a4: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 4 ret %v @@ -130,6 +143,7 @@ ; CHECK-LABEL: unaligned_load_nxv8f16_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v @@ -139,6 +153,7 @@ ; CHECK-LABEL: aligned_load_nxv8f16_a2: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = load , * %ptr, align 2 ret %v @@ -148,6 +163,7 @@ ; CHECK-LABEL: unaligned_store_nxv4i32_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store %x, * %ptr, align 1 ret void @@ -157,6 +173,7 @@ ; CHECK-LABEL: unaligned_store_nxv4i32_a2: ; CHECK: # %bb.0: ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store %x, * %ptr, align 2 ret void @@ -166,6 +183,7 @@ ; CHECK-LABEL: aligned_store_nxv4i32_a4: ; CHECK: # %bb.0: ; CHECK-NEXT: vs2r.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store %x, * %ptr, align 4 ret void @@ -176,6 +194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store %x, * %ptr, align 1 ret void @@ -186,6 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret store %x, * %ptr, align 2 ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll --- a/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll @@ -11,6 +11,7 @@ define <4 x i32> @vload_v4i32_zero_evl(<4 x i32>* %ptr, <4 x i1> %m) { ; CHECK-LABEL: vload_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 0) ret <4 x i32> %v @@ -19,6 +20,7 @@ define <4 x i32> @vload_v4i32_false_mask(<4 x i32>* %ptr, i32 %evl) { ; CHECK-LABEL: vload_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %v @@ -29,6 +31,7 @@ define <4 x i32> @vgather_v4i32_v4i32_zero_evl(<4 x i32*> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vgather_v4i32_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 0) ret <4 x i32> %v @@ -37,6 +40,7 @@ define <4 x i32> @vgather_v4i32_v4i32_false_mask(<4 x i32*> %ptrs, i32 %evl) { ; CHECK-LABEL: vgather_v4i32_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %v @@ -47,6 +51,7 @@ define void @vstore_v4i32_zero_evl(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m) { ; CHECK-LABEL: vstore_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 0) ret void @@ -55,6 +60,7 @@ define void @vstore_v4i32_false_mask(<4 x i32> %val, <4 x i32>* %ptr, i32 %evl) { ; CHECK-LABEL: vstore_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> zeroinitializer, i32 %evl) ret void @@ -65,6 +71,7 @@ define void @vscatter_v4i32_zero_evl(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m) { ; CHECK-LABEL: vscatter_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 0) ret void @@ -73,6 +80,7 @@ define void @vscatter_v4i32_false_mask(<4 x i32> %val, <4 x i32*> %ptrs, i32 %evl) { ; CHECK-LABEL: vscatter_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> zeroinitializer, i32 %evl) ret void @@ -83,6 +91,7 @@ define <4 x i32> @vadd_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vadd_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -91,6 +100,7 @@ define <4 x i32> @vadd_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vadd_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -101,6 +111,7 @@ define <4 x i32> @vand_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vand_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -109,6 +120,7 @@ define <4 x i32> @vand_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vand_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -119,6 +131,7 @@ define <4 x i32> @vlshr_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vlshr_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -127,6 +140,7 @@ define <4 x i32> @vlshr_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vlshr_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -137,6 +151,7 @@ define <4 x i32> @vmul_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vmul_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -145,6 +160,7 @@ define <4 x i32> @vmul_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vmul_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -155,6 +171,7 @@ define <4 x i32> @vor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vor_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -163,6 +180,7 @@ define <4 x i32> @vor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vor_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -173,6 +191,7 @@ define <4 x i32> @vsdiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsdiv_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -181,6 +200,7 @@ define <4 x i32> @vsdiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vsdiv_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -191,6 +211,7 @@ define <4 x i32> @vsrem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsrem_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -199,6 +220,7 @@ define <4 x i32> @vsrem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vsrem_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -209,6 +231,7 @@ define <4 x i32> @vsub_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vsub_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -217,6 +240,7 @@ define <4 x i32> @vsub_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vsub_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -227,6 +251,7 @@ define <4 x i32> @vudiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vudiv_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -235,6 +260,7 @@ define <4 x i32> @vudiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vudiv_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -245,6 +271,7 @@ define <4 x i32> @vurem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vurem_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -253,6 +280,7 @@ define <4 x i32> @vurem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vurem_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -263,6 +291,7 @@ define <4 x i32> @vxor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) { ; CHECK-LABEL: vxor_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0) ret <4 x i32> %s @@ -271,6 +300,7 @@ define <4 x i32> @vxor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) { ; CHECK-LABEL: vxor_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x i32> %s @@ -281,6 +311,7 @@ define <4 x float> @vfadd_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfadd_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0) ret <4 x float> %s @@ -289,6 +320,7 @@ define <4 x float> @vfadd_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) { ; CHECK-LABEL: vfadd_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x float> %s @@ -299,6 +331,7 @@ define <4 x float> @vfsub_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfsub_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0) ret <4 x float> %s @@ -307,6 +340,7 @@ define <4 x float> @vfsub_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) { ; CHECK-LABEL: vfsub_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x float> %s @@ -317,6 +351,7 @@ define <4 x float> @vfmul_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfmul_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0) ret <4 x float> %s @@ -325,6 +360,7 @@ define <4 x float> @vfmul_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) { ; CHECK-LABEL: vfmul_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x float> %s @@ -335,6 +371,7 @@ define <4 x float> @vfdiv_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfdiv_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0) ret <4 x float> %s @@ -343,6 +380,7 @@ define <4 x float> @vfdiv_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) { ; CHECK-LABEL: vfdiv_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x float> %s @@ -353,6 +391,7 @@ define <4 x float> @vfrem_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) { ; CHECK-LABEL: vfrem_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0) ret <4 x float> %s @@ -361,6 +400,7 @@ define <4 x float> @vfrem_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) { ; CHECK-LABEL: vfrem_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl) ret <4 x float> %s @@ -371,6 +411,7 @@ define i32 @vreduce_add_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_add_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.add.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -379,6 +420,7 @@ define i32 @vreduce_add_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_add_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.add.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -389,6 +431,7 @@ define i32 @vreduce_mul_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_mul_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.mul.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -397,6 +440,7 @@ define i32 @vreduce_mul_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_mul_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.mul.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -407,6 +451,7 @@ define i32 @vreduce_and_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_and_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.and.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -415,6 +460,7 @@ define i32 @vreduce_and_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_and_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.and.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -425,6 +471,7 @@ define i32 @vreduce_or_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_or_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.or.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -433,6 +480,7 @@ define i32 @vreduce_or_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_or_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.or.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -443,6 +491,7 @@ define i32 @vreduce_xor_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_xor_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.xor.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -451,6 +500,7 @@ define i32 @vreduce_xor_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_xor_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.xor.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -461,6 +511,7 @@ define i32 @vreduce_smax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smax_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.smax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -469,6 +520,7 @@ define i32 @vreduce_smax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_smax_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.smax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -479,6 +531,7 @@ define i32 @vreduce_smin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_smin_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.smin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -487,6 +540,7 @@ define i32 @vreduce_smin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_smin_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.smin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -497,6 +551,7 @@ define i32 @vreduce_umax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umax_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.umax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -505,6 +560,7 @@ define i32 @vreduce_umax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_umax_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.umax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -515,6 +571,7 @@ define i32 @vreduce_umin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_umin_v4i32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.umin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0) ret i32 %s @@ -523,6 +580,7 @@ define i32 @vreduce_umin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) { ; CHECK-LABEL: vreduce_umin_v4i32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call i32 @llvm.vp.reduce.umin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl) ret i32 %s @@ -533,6 +591,7 @@ define float @vreduce_seq_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fadd_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -541,6 +600,7 @@ define float @vreduce_seq_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_seq_fadd_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s @@ -549,6 +609,7 @@ define float @vreduce_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fadd_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -557,6 +618,7 @@ define float @vreduce_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_fadd_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s @@ -567,6 +629,7 @@ define float @vreduce_seq_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_seq_fmul_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -575,6 +638,7 @@ define float @vreduce_seq_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_seq_fmul_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s @@ -583,6 +647,7 @@ define float @vreduce_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmul_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call reassoc float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -591,6 +656,7 @@ define float @vreduce_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_fmul_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call reassoc float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s @@ -601,6 +667,7 @@ define float @vreduce_fmin_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmin_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmin.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -609,6 +676,7 @@ define float @vreduce_fmin_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_fmin_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmin.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s @@ -619,6 +687,7 @@ define float @vreduce_fmax_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) { ; CHECK-LABEL: vreduce_fmax_v4f32_zero_evl: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmax.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0) ret float %s @@ -627,6 +696,7 @@ define float @vreduce_fmax_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) { ; CHECK-LABEL: vreduce_fmax_v4f32_false_mask: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call float @llvm.vp.reduce.fmax.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl) ret float %s diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -44,6 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %heada = insertelement undef, i8 2, i32 0 %splata = shufflevector %heada, undef, zeroinitializer @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -82,6 +88,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -94,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -106,6 +114,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -118,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -130,6 +140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -142,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -154,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -166,6 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -178,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -190,6 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -202,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -214,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -226,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -238,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -250,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -262,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -274,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -286,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -298,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -310,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -322,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -334,6 +361,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -346,6 +374,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -358,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -370,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -382,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -394,6 +426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -406,6 +439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -418,6 +452,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -430,6 +465,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -442,6 +478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -454,6 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -466,6 +504,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -478,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -490,6 +530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -502,6 +543,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -514,6 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -550,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -562,6 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -574,6 +621,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -586,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -598,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -610,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -622,6 +673,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -634,6 +686,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -646,6 +699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -658,6 +712,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -677,12 +732,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -695,6 +752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -707,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -726,12 +785,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -744,6 +805,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -756,6 +818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -775,12 +838,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -793,6 +858,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -805,6 +871,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -824,12 +891,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -842,6 +911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -854,6 +924,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -101,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -123,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv3i8( %va, %b, %m, i32 %evl) ret %v @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -237,6 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -253,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -263,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -287,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -301,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -313,6 +337,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -329,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -351,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -363,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -427,6 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -439,6 +473,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -491,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -503,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -529,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -541,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -567,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -579,6 +624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -591,6 +637,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -617,6 +665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -646,6 +695,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: sub a0, a1, a2 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: bltu a1, a0, .LBB49_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a0 @@ -680,6 +730,7 @@ ; CHECK-NEXT: .LBB50_4: ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vadd.vi v16, v16, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -696,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -706,6 +758,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -718,6 +771,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -730,6 +784,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -744,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -756,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -772,6 +829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -782,6 +840,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -794,6 +853,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -806,6 +866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -820,6 +881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -832,6 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -848,6 +911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -858,6 +922,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -870,6 +935,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -882,6 +948,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -896,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -908,6 +976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -924,6 +993,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -934,6 +1004,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -946,6 +1017,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -958,6 +1030,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -972,6 +1045,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -984,6 +1058,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1000,6 +1075,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -1010,6 +1086,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1022,6 +1099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1034,6 +1112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1048,6 +1127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1060,6 +1140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1076,6 +1157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -1086,6 +1168,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1098,6 +1181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1110,6 +1194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1124,6 +1209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1136,6 +1222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1152,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1162,6 +1250,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1174,6 +1263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1186,6 +1276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1200,6 +1291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1212,6 +1304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1228,6 +1321,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1238,6 +1332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1250,6 +1345,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1262,6 +1358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1276,6 +1373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1288,6 +1386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1304,6 +1403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1314,6 +1414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1326,6 +1427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1338,6 +1440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1352,6 +1455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1364,6 +1468,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1380,6 +1485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1390,6 +1496,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1402,6 +1509,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1414,6 +1522,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1428,6 +1537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1440,6 +1550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1456,6 +1567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1466,6 +1578,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1478,6 +1591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1490,6 +1604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1504,6 +1619,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1516,6 +1632,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1553,6 +1670,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1587,6 +1705,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1627,6 +1746,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1651,6 +1771,7 @@ ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; RV32-NEXT: vadd.vi v8, v8, -1, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vi_nxv32i32_evl_nx16: @@ -1665,6 +1786,7 @@ ; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1681,6 +1803,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1691,6 +1814,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1711,12 +1835,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1737,12 +1863,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1757,6 +1885,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1769,6 +1898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1785,6 +1915,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1795,6 +1926,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1815,12 +1947,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1841,12 +1975,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1861,6 +1997,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1873,6 +2010,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1889,6 +2027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1899,6 +2038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1919,12 +2059,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1945,12 +2087,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1965,6 +2109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1977,6 +2122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1993,6 +2139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -2003,6 +2150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2023,12 +2171,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2049,12 +2199,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2069,6 +2221,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2081,6 +2234,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -54,6 +58,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -76,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -88,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -113,6 +122,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -125,6 +135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -135,6 +146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -194,6 +210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -206,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -218,6 +236,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -231,6 +250,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -243,6 +263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -253,6 +274,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -265,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -277,6 +300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -290,6 +314,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -312,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -324,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +378,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -361,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -371,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -383,6 +415,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -395,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -408,6 +442,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -420,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -430,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -442,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -454,6 +492,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -467,6 +506,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -479,6 +519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -489,6 +530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +543,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -513,6 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +570,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -548,6 +594,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -560,6 +607,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -572,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -585,6 +634,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -597,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -607,6 +658,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -619,6 +671,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -631,6 +684,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -644,6 +698,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -656,6 +711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -666,6 +722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -690,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -703,6 +762,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -715,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -725,6 +786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -737,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -749,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -762,6 +826,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -774,6 +839,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -784,6 +850,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -796,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -808,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -821,6 +890,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -833,6 +903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -843,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -855,6 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -867,6 +940,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -880,6 +954,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -892,6 +967,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -902,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,6 +991,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -926,6 +1004,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -939,6 +1018,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -951,6 +1031,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -961,6 +1042,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -973,6 +1055,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -985,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -998,6 +1082,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1010,6 +1095,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -1020,6 +1106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1032,6 +1119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1044,6 +1132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1057,6 +1146,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1069,6 +1159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -1086,12 +1177,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1104,6 +1197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1116,6 +1210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1129,6 +1224,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1141,6 +1237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -1158,12 +1255,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1176,6 +1275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1188,6 +1288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1201,6 +1302,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1213,6 +1315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -1230,12 +1333,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1248,6 +1353,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1260,6 +1366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1273,6 +1380,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1285,6 +1393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -1302,12 +1411,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1320,6 +1431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, -10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -10, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1332,6 +1444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1345,6 +1458,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -101,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -123,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -237,6 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -253,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -263,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -287,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -301,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -313,6 +337,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -329,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -351,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -363,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -427,6 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -439,6 +473,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -491,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -503,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -529,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -541,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -567,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -579,6 +624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -591,6 +637,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -617,6 +665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -633,6 +682,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -643,6 +693,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -655,6 +706,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -667,6 +719,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -681,6 +734,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -693,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -709,6 +764,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -719,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -731,6 +788,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -757,6 +816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -769,6 +829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -785,6 +846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -795,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -807,6 +870,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -819,6 +883,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -833,6 +898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -845,6 +911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -861,6 +928,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv14i16( %va, %b, %m, i32 %evl) ret %v @@ -871,6 +939,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -883,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -895,6 +965,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -909,6 +980,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -921,6 +993,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -937,6 +1010,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -947,6 +1021,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -959,6 +1034,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -971,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -985,6 +1062,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -997,6 +1075,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1013,6 +1092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -1023,6 +1103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1035,6 +1116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1047,6 +1129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1061,6 +1144,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1073,6 +1157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1089,6 +1174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1099,6 +1185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1111,6 +1198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1123,6 +1211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1137,6 +1226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1149,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1165,6 +1256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1175,6 +1267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1187,6 +1280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1199,6 +1293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1213,6 +1308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1225,6 +1321,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1241,6 +1338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1251,6 +1349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1263,6 +1362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1275,6 +1375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1289,6 +1390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1301,6 +1403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1317,6 +1420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1327,6 +1431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1339,6 +1444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1351,6 +1457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1365,6 +1472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1377,6 +1485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1393,6 +1502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1403,6 +1513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1415,6 +1526,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1427,6 +1539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1441,6 +1554,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1453,6 +1567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1469,6 +1584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1479,6 +1595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1499,12 +1616,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1525,12 +1644,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1545,6 +1666,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1557,6 +1679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1573,6 +1696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1583,6 +1707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1603,12 +1728,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1629,12 +1756,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1649,6 +1778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1661,6 +1791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1677,6 +1808,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1687,6 +1819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1707,12 +1840,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1733,12 +1868,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1753,6 +1890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1765,6 +1903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1781,6 +1920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1791,6 +1931,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1811,12 +1952,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1837,12 +1980,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1857,6 +2002,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1869,6 +2015,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -34,6 +36,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v9, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ define @vdiv_vi_nxv1i8_1( %va) { ; CHECK-LABEL: vdiv_vi_nxv1i8_1: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -80,6 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +104,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v9, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -136,6 +146,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v9, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -148,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -158,6 +170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +188,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v9, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -214,6 +230,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v10, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -226,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -236,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +272,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v12, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -265,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +314,7 @@ ; CHECK-NEXT: vsra.vi v8, v8, 2 ; CHECK-NEXT: vsrl.vi v16, v8, 7 ; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -304,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -314,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -331,6 +356,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv1i16_0: @@ -342,6 +368,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -364,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -381,6 +410,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv2i16_0: @@ -392,6 +422,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -404,6 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -414,6 +446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -431,6 +464,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv4i16_0: @@ -442,6 +476,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -454,6 +489,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -464,6 +500,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v10, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv8i16_0: @@ -492,6 +530,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v10, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -504,6 +543,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -514,6 +554,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -531,6 +572,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v12, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv16i16_0: @@ -542,6 +584,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v12, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -554,6 +597,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -564,6 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -581,6 +626,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v16, v8, 15 ; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv32i16_0: @@ -592,6 +638,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v16, v8, 15 ; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -604,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -614,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -632,6 +681,7 @@ ; RV32-NEXT: vsrl.vi v9, v8, 31 ; RV32-NEXT: vsra.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv1i32_0: @@ -644,6 +694,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 2 ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -656,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -666,6 +718,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -684,6 +737,7 @@ ; RV32-NEXT: vsrl.vi v9, v8, 31 ; RV32-NEXT: vsra.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv2i32_0: @@ -696,6 +750,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 2 ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -708,6 +763,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -718,6 +774,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -736,6 +793,7 @@ ; RV32-NEXT: vsrl.vi v10, v8, 31 ; RV32-NEXT: vsra.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv4i32_0: @@ -748,6 +806,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 2 ; RV64-NEXT: vsrl.vi v10, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -760,6 +819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -770,6 +830,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -788,6 +849,7 @@ ; RV32-NEXT: vsrl.vi v12, v8, 31 ; RV32-NEXT: vsra.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv8i32_0: @@ -800,6 +862,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 2 ; RV64-NEXT: vsrl.vi v12, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -812,6 +875,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -822,6 +886,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -840,6 +905,7 @@ ; RV32-NEXT: vsrl.vi v16, v8, 31 ; RV32-NEXT: vsra.vi v8, v8, 2 ; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv16i32_0: @@ -852,6 +918,7 @@ ; RV64-NEXT: vsra.vi v8, v8, 2 ; RV64-NEXT: vsrl.vi v16, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -864,6 +931,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -881,12 +949,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,6 +984,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv1i64_0: @@ -932,6 +1003,7 @@ ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -944,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -961,12 +1034,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -994,6 +1069,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv2i64_0: @@ -1012,6 +1088,7 @@ ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1024,6 +1101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -1041,12 +1119,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1074,6 +1154,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv4i64_0: @@ -1092,6 +1173,7 @@ ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1104,6 +1186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sdiv %va, %vb ret %vc @@ -1121,12 +1204,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1154,6 +1239,7 @@ ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vi_nxv8i64_0: @@ -1172,6 +1258,7 @@ ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -17,6 +17,7 @@ ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -31,6 +32,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -41,6 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -53,6 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -81,6 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -91,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -115,6 +123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -131,6 +140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv3i8( %va, %b, %m, i32 %evl) ret %v @@ -143,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -153,6 +164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -165,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -193,6 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -203,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -243,6 +261,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -253,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -265,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -277,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -315,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -343,6 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -353,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -365,6 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -393,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -477,6 +514,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -493,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -503,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -515,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -527,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -543,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -553,6 +596,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -565,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -593,6 +639,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -603,6 +650,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -627,6 +676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +693,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -665,6 +717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -677,6 +730,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -693,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -703,6 +758,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -715,6 +771,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -727,6 +784,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -753,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -765,6 +825,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -777,6 +838,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -803,6 +866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -815,6 +879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -827,6 +892,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -843,6 +909,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -853,6 +920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -865,6 +933,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -877,6 +946,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -893,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -903,6 +974,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -915,6 +987,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -927,6 +1000,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -943,6 +1017,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -953,6 +1028,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -973,12 +1049,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -999,12 +1077,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1021,6 +1101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1031,6 +1112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1051,12 +1133,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1077,12 +1161,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1099,6 +1185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1109,6 +1196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1129,12 +1217,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1155,12 +1245,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1177,6 +1269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sdiv.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1187,6 +1280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1207,12 +1301,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1233,12 +1329,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdiv.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -42,6 +45,7 @@ define @vdivu_vi_nxv1i8_1( %va) { ; CHECK-LABEL: vdivu_vi_nxv1i8_1: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -67,6 +72,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -91,6 +98,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -103,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -113,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -139,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -149,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -163,6 +176,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -185,6 +200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -199,6 +215,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -221,6 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +254,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -247,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -257,6 +278,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -271,6 +293,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -283,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -293,6 +317,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -308,6 +333,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv1i16_0: @@ -317,6 +343,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -329,6 +356,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -339,6 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +383,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv2i16_0: @@ -363,6 +393,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -375,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -385,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -400,6 +433,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv4i16_0: @@ -409,6 +443,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -421,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -431,6 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -446,6 +483,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv8i16_0: @@ -455,6 +493,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -467,6 +506,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -477,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -492,6 +533,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv16i16_0: @@ -501,6 +543,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -513,6 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -523,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +583,7 @@ ; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv32i16_0: @@ -547,6 +593,7 @@ ; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -559,6 +606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -569,6 +617,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -584,6 +633,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv1i32_0: @@ -593,6 +643,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -605,6 +656,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -615,6 +667,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -630,6 +683,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv2i32_0: @@ -639,6 +693,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -651,6 +706,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -661,6 +717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -676,6 +733,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv4i32_0: @@ -685,6 +743,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -697,6 +756,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -707,6 +767,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -722,6 +783,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv8i32_0: @@ -731,6 +793,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -743,6 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -753,6 +817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -768,6 +833,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv16i32_0: @@ -777,6 +843,7 @@ ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -789,6 +856,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -806,12 +874,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -835,6 +905,7 @@ ; RV32-NEXT: addi a0, zero, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv1i64_0: @@ -846,6 +917,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -858,6 +930,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -872,6 +945,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vadd.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -885,6 +959,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -902,12 +977,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -931,6 +1008,7 @@ ; RV32-NEXT: addi a0, zero, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv2i64_0: @@ -942,6 +1020,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -954,6 +1033,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -968,6 +1048,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vadd.vi v10, v10, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -981,6 +1062,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -998,12 +1080,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1027,6 +1111,7 @@ ; RV32-NEXT: addi a0, zero, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv4i64_0: @@ -1038,6 +1123,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1050,6 +1136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1064,6 +1151,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vi v12, v12, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1077,6 +1165,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = udiv %va, %vb ret %vc @@ -1094,12 +1183,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1123,6 +1214,7 @@ ; RV32-NEXT: addi a0, zero, 61 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vi_nxv8i64_0: @@ -1134,6 +1226,7 @@ ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: addi a0, zero, 61 ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1146,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1160,6 +1254,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vadd.vi v16, v16, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a2 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -30,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -40,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -52,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -64,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -80,6 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -90,6 +96,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -102,6 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -114,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -130,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv3i8( %va, %b, %m, i32 %evl) ret %v @@ -142,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -152,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -164,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -176,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -192,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -202,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -214,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -226,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -242,6 +260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -252,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -264,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -276,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -292,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -302,6 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -314,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -326,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -342,6 +368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -352,6 +379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -364,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -376,6 +405,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -392,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -402,6 +433,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -414,6 +446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -426,6 +459,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -442,6 +476,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -452,6 +487,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -464,6 +500,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -476,6 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -492,6 +530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -502,6 +541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -514,6 +554,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -542,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -552,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -576,6 +621,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -592,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -602,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -614,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -642,6 +692,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -652,6 +703,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -676,6 +729,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -692,6 +746,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -702,6 +757,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -714,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -726,6 +783,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -752,6 +811,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -764,6 +824,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -776,6 +837,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -792,6 +854,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -802,6 +865,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -814,6 +878,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -826,6 +891,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -842,6 +908,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -852,6 +919,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -864,6 +932,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -876,6 +945,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -892,6 +962,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -902,6 +973,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -914,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -926,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -942,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -952,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -972,12 +1048,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -998,12 +1076,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1020,6 +1100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1030,6 +1111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1050,12 +1132,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1076,12 +1160,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1098,6 +1184,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1108,6 +1195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1128,12 +1216,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1154,12 +1244,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1176,6 +1268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.udiv.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1186,6 +1279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vdivu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1206,12 +1300,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1232,12 +1328,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vdivu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll @@ -8,6 +8,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -19,6 +20,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -30,6 +32,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -41,6 +44,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -52,6 +56,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -63,6 +68,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -74,6 +80,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -85,6 +92,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -96,6 +104,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -107,6 +116,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -118,6 +128,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -129,6 +140,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -140,6 +152,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -151,6 +164,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -162,6 +176,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -173,6 +188,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -184,6 +200,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -195,6 +212,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -206,6 +224,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -217,6 +236,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -228,6 +248,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -239,6 +260,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -250,6 +272,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -261,6 +284,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -272,6 +296,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -283,6 +308,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -294,6 +320,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -305,6 +332,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -316,6 +344,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -327,6 +356,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -338,6 +368,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -349,6 +380,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -360,6 +392,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -371,6 +404,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -382,6 +416,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -393,6 +428,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -404,6 +440,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -415,6 +452,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -426,6 +464,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -437,6 +476,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -448,6 +488,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -459,6 +500,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -470,6 +512,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -481,6 +524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -492,6 +536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -503,6 +548,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -514,6 +560,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -525,6 +572,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -536,6 +584,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -547,6 +596,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -558,6 +608,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -569,6 +620,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -580,6 +632,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -591,6 +644,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -602,6 +656,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -613,6 +668,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = zext %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f16( %v) ret %r @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f16( %v) ret %r @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f16( %v) ret %r @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f16( %v) ret %r @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f16( %v) ret %r @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv32f16( %v) ret %r @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f32( %v) ret %r @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f32( %v) ret %r @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f32( %v) ret %r @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f32( %v) ret %r @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f32( %v) ret %r @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f64( %v) ret %r @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f64( %v) ret %r @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f64( %v) ret %r @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjx.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f64( %v) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -63,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -75,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -85,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -119,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -141,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -153,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -163,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -185,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -207,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -219,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -229,6 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -241,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -263,6 +286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -285,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -297,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -307,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -329,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -341,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fadd %va, %vb ret %vc @@ -351,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -363,6 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f16( %va, %b, %m, i32 %evl) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -49,6 +52,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f16( %va, %b, %m, i32 %evl) ret %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f16( %va, %b, %m, i32 %evl) ret %v @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -143,6 +153,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -157,6 +168,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f16( %va, %b, %m, i32 %evl) ret %v @@ -183,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +226,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv16f16( %va, %b, %m, i32 %evl) ret %v @@ -237,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -265,6 +284,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv32f16( %va, %b, %m, i32 %evl) ret %v @@ -291,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -305,6 +327,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +342,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f32( %va, %b, %m, i32 %evl) ret %v @@ -345,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -359,6 +385,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -373,6 +400,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f32( %va, %b, %m, i32 %evl) ret %v @@ -399,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -413,6 +443,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +458,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f32( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -467,6 +501,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +516,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f32( %va, %b, %m, i32 %evl) ret %v @@ -507,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -521,6 +559,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +574,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv16f32( %va, %b, %m, i32 %evl) ret %v @@ -561,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -575,6 +617,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +632,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv1f64( %va, %b, %m, i32 %evl) ret %v @@ -615,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -629,6 +675,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +690,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv2f64( %va, %b, %m, i32 %evl) ret %v @@ -669,6 +718,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -683,6 +733,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +748,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv4f64( %va, %b, %m, i32 %evl) ret %v @@ -723,6 +776,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -737,6 +791,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -751,6 +806,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -767,6 +823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv7f64( %va, %b, %m, i32 %evl) ret %v @@ -779,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fadd.nxv8f64( %va, %b, %m, i32 %evl) ret %v @@ -789,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -803,6 +862,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -817,6 +877,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f16( %vm, %vs) ret %r @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv1f16( %vm, %n) @@ -44,6 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -58,6 +62,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv1f16( %vm, %e) @@ -72,6 +77,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -86,6 +92,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -101,6 +108,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -118,6 +126,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv1f16( %vm, %e) @@ -134,6 +143,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -150,6 +160,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -167,6 +178,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -183,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f16( %vm, %vs) ret %r @@ -193,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -205,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv2f16( %vm, %n) @@ -216,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -231,6 +247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f16( %vm, %vs) ret %r @@ -241,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv4f16( %vm, %n) @@ -264,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -279,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f16( %vm, %vs) ret %r @@ -289,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -301,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv8f16( %vm, %n) @@ -312,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -326,6 +350,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv8f16( %vm, %e) @@ -340,6 +365,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +380,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -369,6 +396,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -386,6 +414,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv8f16( %vm, %e) @@ -402,6 +431,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -418,6 +448,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -435,6 +466,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -451,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv16f16( %vm, %vs) ret %r @@ -461,6 +494,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -473,6 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv16f16( %vm, %n) @@ -484,6 +519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -499,6 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv32f16( %vm, %vs) ret %r @@ -509,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -521,6 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv32f16( %vm, %n) @@ -532,6 +571,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -547,6 +587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f32( %vm, %vs) ret %r @@ -557,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -569,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv1f32( %vm, %n) @@ -580,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -595,6 +639,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv1f32( %vm, %e) @@ -609,6 +654,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -624,6 +670,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -639,6 +686,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -654,6 +702,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv1f32( %vm, %e) @@ -668,6 +717,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -682,6 +732,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -697,6 +748,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f32( %vm, %vs) ret %r @@ -723,6 +776,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -735,6 +789,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv2f32( %vm, %n) @@ -746,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -761,6 +817,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f32( %vm, %vs) ret %r @@ -771,6 +828,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -783,6 +841,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv4f32( %vm, %n) @@ -794,6 +853,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -809,6 +869,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f32( %vm, %vs) ret %r @@ -819,6 +880,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -831,6 +893,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv8f32( %vm, %n) @@ -842,6 +905,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -857,6 +921,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv8f32( %vm, %e) @@ -871,6 +936,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -886,6 +952,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -901,6 +968,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -916,6 +984,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fptrunc %vs to %r = call @llvm.copysign.nxv8f32( %vm, %e) @@ -930,6 +999,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -944,6 +1014,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fptrunc %n to @@ -959,6 +1030,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -975,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv16f32( %vm, %vs) ret %r @@ -985,6 +1058,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -997,6 +1071,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv16f32( %vm, %n) @@ -1008,6 +1083,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1023,6 +1099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f64( %vm, %vs) ret %r @@ -1033,6 +1110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1045,6 +1123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv1f64( %vm, %n) @@ -1056,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1073,6 +1153,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv1f64( %vm, %e) @@ -1089,6 +1170,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1106,6 +1188,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -1123,6 +1206,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1139,6 +1223,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv1f64( %vm, %e) @@ -1153,6 +1238,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1168,6 +1254,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -1183,6 +1270,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1199,6 +1287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f64( %vm, %vs) ret %r @@ -1209,6 +1298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1221,6 +1311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv2f64( %vm, %n) @@ -1232,6 +1323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1247,6 +1339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f64( %vm, %vs) ret %r @@ -1257,6 +1350,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1269,6 +1363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv4f64( %vm, %n) @@ -1280,6 +1375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1295,6 +1391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f64( %vm, %vs) ret %r @@ -1305,6 +1402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1317,6 +1415,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %r = call @llvm.copysign.nxv8f64( %vm, %n) @@ -1328,6 +1427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1345,6 +1445,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv8f64( %vm, %e) @@ -1361,6 +1462,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1378,6 +1480,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -1395,6 +1498,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1411,6 +1515,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %e = fpext %vs to %r = call @llvm.copysign.nxv8f64( %vm, %e) @@ -1425,6 +1530,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1440,6 +1546,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %n = fneg %vs %eneg = fpext %n to @@ -1455,6 +1562,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %s, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -63,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -75,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -85,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -119,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -141,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -153,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -163,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -185,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -207,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -219,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -229,6 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -241,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -263,6 +286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -285,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -297,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -307,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -329,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -341,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fdiv %va, %vb ret %vc @@ -351,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -363,6 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f16( %va, %b, %m, i32 %evl) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -49,6 +52,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f16( %va, %b, %m, i32 %evl) ret %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f16( %va, %b, %m, i32 %evl) ret %v @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -143,6 +153,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -157,6 +168,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f16( %va, %b, %m, i32 %evl) ret %v @@ -183,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +226,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv16f16( %va, %b, %m, i32 %evl) ret %v @@ -237,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -265,6 +284,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv32f16( %va, %b, %m, i32 %evl) ret %v @@ -291,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -305,6 +327,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +342,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f32( %va, %b, %m, i32 %evl) ret %v @@ -345,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -359,6 +385,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -373,6 +400,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f32( %va, %b, %m, i32 %evl) ret %v @@ -399,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -413,6 +443,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +458,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f32( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -467,6 +501,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +516,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f32( %va, %b, %m, i32 %evl) ret %v @@ -507,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -521,6 +559,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +574,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv16f32( %va, %b, %m, i32 %evl) ret %v @@ -561,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -575,6 +617,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +632,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv1f64( %va, %b, %m, i32 %evl) ret %v @@ -615,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -629,6 +675,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +690,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv2f64( %va, %b, %m, i32 %evl) ret %v @@ -669,6 +718,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -683,6 +733,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +748,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv4f64( %va, %b, %m, i32 %evl) ret %v @@ -723,6 +776,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -737,6 +791,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -751,6 +806,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -767,6 +823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv7f64( %va, %b, %m, i32 %evl) ret %v @@ -779,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fdiv.nxv8f64( %va, %b, %m, i32 %evl) ret %v @@ -789,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -803,6 +862,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -817,6 +877,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -14,6 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f16( %va, %vb, %vc) ret %vd @@ -24,6 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -38,6 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f16( %va, %vc, %vb) ret %vd @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -62,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f16( %vb, %va, %vc) ret %vd @@ -72,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -86,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmacc.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f16( %vb, %vc, %va) ret %vd @@ -96,6 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -110,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v16f16( %vc, %va, %vb) ret %vd @@ -120,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -135,6 +145,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v32f16( %vc, %vb, %va) ret %vd @@ -145,6 +156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f32( %va, %vb, %vc) ret %vd @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -183,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f32( %va, %vc, %vb) ret %vd @@ -193,6 +208,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -207,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f32( %vb, %va, %vc) ret %vd @@ -217,6 +234,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -231,6 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmacc.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f32( %vb, %vc, %va) ret %vd @@ -241,6 +260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -256,6 +276,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v16f32( %vc, %va, %vb) ret %vd @@ -266,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -280,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f64( %va, %vb, %vc) ret %vd @@ -290,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -304,6 +328,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f64( %va, %vc, %vb) ret %vd @@ -314,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -328,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f64( %vb, %va, %vc) ret %vd @@ -338,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -353,6 +381,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f64( %vb, %vc, %va) ret %vd @@ -363,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmacc.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f16( %a, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f16( %a, %b) ret %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f16( %a, %b) ret %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f16( %a, %b) ret %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv16f16( %a, %b) ret %v @@ -117,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv32f16( %a, %b) ret %v @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f32( %a, %b) ret %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f32( %a, %b) ret %v @@ -189,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f32( %a, %b) ret %v @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f32( %a, %b) ret %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv16f32( %a, %b) ret %v @@ -261,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f64( %a, %b) ret %v @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f64( %a, %b) ret %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -323,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f64( %a, %b) ret %v @@ -333,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -347,6 +375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f64( %a, %b) ret %v @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f16( %a, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f16( %a, %b) ret %v @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f16( %a, %b) ret %v @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f16( %a, %b) ret %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv16f16( %a, %b) ret %v @@ -117,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv32f16( %a, %b) ret %v @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f32( %a, %b) ret %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f32( %a, %b) ret %v @@ -189,6 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f32( %a, %b) ret %v @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f32( %a, %b) ret %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv16f32( %a, %b) ret %v @@ -261,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f64( %a, %b) ret %v @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f64( %a, %b) ret %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -323,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f64( %a, %b) ret %v @@ -333,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -347,6 +375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f64( %a, %b) ret %v @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -14,6 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v1f16( %va, %vb, %neg) @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -40,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v2f16( %va, %vc, %neg) @@ -51,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +70,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v4f16( %vb, %va, %neg) @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -92,6 +98,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmsac.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v8f16( %vb, %vc, %neg) @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -118,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v16f16( %vc, %va, %neg) @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -145,6 +155,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v32f16( %vc, %vb, %neg) @@ -156,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v1f32( %va, %vb, %neg) @@ -182,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v2f32( %va, %vc, %neg) @@ -208,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v4f32( %vb, %va, %neg) @@ -234,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -249,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmsac.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v8f32( %vb, %vc, %neg) @@ -260,6 +279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -276,6 +296,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v16f32( %vc, %va, %neg) @@ -287,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v1f64( %va, %vb, %neg) @@ -313,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -328,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v2f64( %va, %vc, %neg) @@ -339,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v4f64( %vb, %va, %neg) @@ -365,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -381,6 +409,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v8f64( %vb, %vc, %neg) @@ -392,6 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmsac.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -63,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -75,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -85,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -119,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -141,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -153,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -163,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -185,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -207,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -219,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -229,6 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -241,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -263,6 +286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -285,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -297,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -307,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -329,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -341,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fmul %va, %vb ret %vc @@ -351,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -363,6 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f16( %va, %b, %m, i32 %evl) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -49,6 +52,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f16( %va, %b, %m, i32 %evl) ret %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f16( %va, %b, %m, i32 %evl) ret %v @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -143,6 +153,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -157,6 +168,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f16( %va, %b, %m, i32 %evl) ret %v @@ -183,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +226,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv16f16( %va, %b, %m, i32 %evl) ret %v @@ -237,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -265,6 +284,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv32f16( %va, %b, %m, i32 %evl) ret %v @@ -291,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -305,6 +327,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +342,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f32( %va, %b, %m, i32 %evl) ret %v @@ -345,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -359,6 +385,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -373,6 +400,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f32( %va, %b, %m, i32 %evl) ret %v @@ -399,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -413,6 +443,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +458,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f32( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -467,6 +501,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +516,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f32( %va, %b, %m, i32 %evl) ret %v @@ -507,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -521,6 +559,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +574,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv16f32( %va, %b, %m, i32 %evl) ret %v @@ -561,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -575,6 +617,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +632,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv1f64( %va, %b, %m, i32 %evl) ret %v @@ -615,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -629,6 +675,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +690,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv2f64( %va, %b, %m, i32 %evl) ret %v @@ -669,6 +718,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -683,6 +733,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +748,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv4f64( %va, %b, %m, i32 %evl) ret %v @@ -723,6 +776,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -737,6 +791,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -751,6 +806,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -767,6 +823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv7f64( %va, %b, %m, i32 %evl) ret %v @@ -779,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fmul.nxv8f64( %va, %b, %m, i32 %evl) ret %v @@ -789,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -803,6 +862,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -817,6 +877,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -49,6 +53,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -59,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -69,6 +75,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -79,6 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -89,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -99,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -109,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -119,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -129,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -139,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb @@ -149,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vb = fneg %va ret %vb diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -14,6 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %neg2 = fneg %vc @@ -26,6 +27,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -42,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %neg2 = fneg %vb @@ -54,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %vc @@ -82,6 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -98,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfnmacc.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %va @@ -110,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -126,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %neg2 = fneg %vb @@ -138,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -155,6 +165,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %neg2 = fneg %vb @@ -167,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfnmacc.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -183,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %vc @@ -195,6 +208,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %neg2 = fneg %vb @@ -223,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -239,6 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %neg2 = fneg %vc @@ -251,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -267,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfnmacc.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %neg2 = fneg %va @@ -279,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -296,6 +316,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %neg2 = fneg %vb @@ -308,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -324,6 +346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfnmacc.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %va @@ -336,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -352,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %neg2 = fneg %vb @@ -364,6 +389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %vc @@ -392,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -409,6 +437,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfnmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %neg2 = fneg %va @@ -421,6 +450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfnmacc.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -14,6 +14,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v1f16( %neg, %vb, %vc) @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -40,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v2f16( %neg, %vc, %vb) @@ -51,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +70,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v4f16( %neg, %va, %vc) @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -92,6 +98,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfnmsac.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v8f16( %neg, %vc, %va) @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -118,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v16f16( %neg, %va, %vb) @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -145,6 +155,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v32f16( %neg, %va, %vb) @@ -156,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfnmsac.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v1f32( %va, %neg, %vc) @@ -182,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v2f32( %va, %neg, %vb) @@ -208,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v4f32( %vb, %neg, %vc) @@ -234,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -249,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfnmsac.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vc %vd = call @llvm.fma.v8f32( %vb, %neg, %va) @@ -260,6 +279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -276,6 +296,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v16f32( %vc, %neg, %vb) @@ -287,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfnmsac.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v1f64( %vc, %neg, %va) @@ -313,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -328,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %va %vd = call @llvm.fma.v2f64( %neg, %vc, %vb) @@ -339,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -354,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfnmsub.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v4f64( %neg, %va, %vc) @@ -365,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -381,6 +409,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfnmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %neg = fneg %vb %vd = call @llvm.fma.v8f64( %neg, %vc, %va) @@ -392,6 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfnmsac.vf v8, fa0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv1f16_nxv1f32: @@ -18,6 +19,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -31,6 +33,7 @@ ; RV32-NEXT: vfwcvt.f.f.v v9, v8 ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv1f16_nxv1f64: @@ -39,6 +42,7 @@ ; RV64-NEXT: vfwcvt.f.f.v v9, v8 ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -51,6 +55,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv2f16_nxv2f32: @@ -58,6 +63,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -71,6 +77,7 @@ ; RV32-NEXT: vfwcvt.f.f.v v10, v8 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv2f16_nxv2f64: @@ -79,6 +86,7 @@ ; RV64-NEXT: vfwcvt.f.f.v v10, v8 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -91,6 +99,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v10, v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv4f16_nxv4f32: @@ -98,6 +107,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v10, v8 ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -111,6 +121,7 @@ ; RV32-NEXT: vfwcvt.f.f.v v12, v8 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv4f16_nxv4f64: @@ -119,6 +130,7 @@ ; RV64-NEXT: vfwcvt.f.f.v v12, v8 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -131,6 +143,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v12, v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv8f16_nxv8f32: @@ -138,6 +151,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v12, v8 ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -151,6 +165,7 @@ ; RV32-NEXT: vfwcvt.f.f.v v16, v8 ; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv8f16_nxv8f64: @@ -159,6 +174,7 @@ ; RV64-NEXT: vfwcvt.f.f.v v16, v8 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -171,6 +187,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v16, v8 ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv16f16_nxv16f32: @@ -178,6 +195,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v16, v8 ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -190,6 +208,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv1f32_nxv1f64: @@ -197,6 +216,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -209,6 +229,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v10, v8 ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv2f32_nxv2f64: @@ -216,6 +237,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v10, v8 ; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -228,6 +250,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v12, v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv4f32_nxv4f64: @@ -235,6 +258,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v12, v8 ; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec @@ -247,6 +271,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vfwcvt.f.f.v v16, v8 ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfpext_nxv8f32_nxv8f64: @@ -254,6 +279,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vfwcvt.f.f.v v16, v8 ; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fpext %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -23,6 +24,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -34,6 +36,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -45,6 +48,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -65,6 +70,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -76,6 +82,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -87,6 +94,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -99,6 +107,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -111,6 +120,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -123,6 +133,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -135,6 +146,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -146,6 +158,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -157,6 +170,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -167,6 +181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -177,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -188,6 +204,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -199,6 +216,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -211,6 +229,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -223,6 +242,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -235,6 +255,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -247,6 +268,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -258,6 +280,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -269,6 +292,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -279,6 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -289,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -300,6 +326,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -311,6 +338,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -323,6 +351,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -335,6 +364,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -347,6 +377,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -359,6 +390,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -370,6 +402,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -381,6 +414,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -391,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -401,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -412,6 +448,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -423,6 +460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -435,6 +473,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -447,6 +486,7 @@ ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -459,6 +499,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -471,6 +512,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -482,6 +524,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -493,6 +536,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -503,6 +547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -513,6 +558,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -524,6 +570,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -535,6 +582,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -547,6 +595,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -559,6 +608,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -570,6 +620,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -581,6 +632,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -591,6 +643,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -601,6 +654,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -613,6 +667,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -625,6 +680,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -637,6 +693,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -649,6 +706,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -660,6 +718,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -671,6 +730,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -681,6 +741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -691,6 +752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -702,6 +764,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -713,6 +776,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -725,6 +789,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -737,6 +802,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -749,6 +815,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -761,6 +828,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -772,6 +840,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -783,6 +852,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -793,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -803,6 +874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -814,6 +886,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -825,6 +898,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -837,6 +911,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -849,6 +924,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -861,6 +937,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -873,6 +950,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -884,6 +962,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -895,6 +974,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -905,6 +985,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -915,6 +996,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -926,6 +1008,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -937,6 +1020,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -949,6 +1033,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -961,6 +1046,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -973,6 +1059,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -985,6 +1072,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -996,6 +1084,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1007,6 +1096,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1017,6 +1107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1027,6 +1118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1038,6 +1130,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1049,6 +1142,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1061,6 +1155,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1073,6 +1168,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1085,6 +1181,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1097,6 +1194,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1108,6 +1206,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1119,6 +1218,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1129,6 +1229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1139,6 +1240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1151,6 +1253,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1163,6 +1266,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1177,6 +1281,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1191,6 +1296,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1203,6 +1309,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1215,6 +1322,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v9, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1226,6 +1334,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1237,6 +1346,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1247,6 +1357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1257,6 +1368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1269,6 +1381,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1281,6 +1394,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1295,6 +1409,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1309,6 +1424,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1321,6 +1437,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1333,6 +1450,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1344,6 +1462,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1355,6 +1474,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1365,6 +1485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1375,6 +1496,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1387,6 +1509,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1399,6 +1522,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1413,6 +1537,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1427,6 +1552,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1439,6 +1565,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1451,6 +1578,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1462,6 +1590,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1473,6 +1602,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1483,6 +1613,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1493,6 +1624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1505,6 +1637,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1517,6 +1650,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1531,6 +1665,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1545,6 +1680,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1557,6 +1693,7 @@ ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1569,6 +1706,7 @@ ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1580,6 +1718,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1591,6 +1730,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1601,6 +1741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1611,6 +1752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv1f32_nxv1f16: @@ -18,6 +19,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -30,6 +32,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv2f32_nxv2f16: @@ -37,6 +40,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -49,6 +53,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vfncvt.f.f.w v10, v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f32_nxv4f16: @@ -56,6 +61,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vfncvt.f.f.w v10, v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -68,6 +74,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v12, v8 ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f32_nxv8f16: @@ -75,6 +82,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v12, v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -87,6 +95,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v16, v8 ; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv16f32_nxv16f16: @@ -94,6 +103,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v16, v8 ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -107,6 +117,7 @@ ; RV32-NEXT: vfncvt.rod.f.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv1f64_nxv1f16: @@ -115,6 +126,7 @@ ; RV64-NEXT: vfncvt.rod.f.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -127,6 +139,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv1f64_nxv1f32: @@ -134,6 +147,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -147,6 +161,7 @@ ; RV32-NEXT: vfncvt.rod.f.f.w v10, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv2f64_nxv2f16: @@ -155,6 +170,7 @@ ; RV64-NEXT: vfncvt.rod.f.f.w v10, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -167,6 +183,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vfncvt.f.f.w v10, v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv2f64_nxv2f32: @@ -174,6 +191,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vfncvt.f.f.w v10, v8 ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -187,6 +205,7 @@ ; RV32-NEXT: vfncvt.rod.f.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vfncvt.f.f.w v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f64_nxv4f16: @@ -195,6 +214,7 @@ ; RV64-NEXT: vfncvt.rod.f.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vfncvt.f.f.w v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -207,6 +227,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v12, v8 ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f64_nxv4f32: @@ -214,6 +235,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v12, v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -227,6 +249,7 @@ ; RV32-NEXT: vfncvt.rod.f.f.w v16, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f64_nxv8f16: @@ -235,6 +258,7 @@ ; RV64-NEXT: vfncvt.rod.f.f.w v16, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -247,6 +271,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v16, v8 ; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f64_nxv8f32: @@ -254,6 +279,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v16, v8 ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -27,6 +28,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -45,6 +47,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +62,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -77,6 +81,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -91,6 +96,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -109,6 +115,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -123,6 +130,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -141,6 +149,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -155,6 +164,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +183,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -187,6 +198,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -205,6 +217,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -219,6 +232,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -237,6 +251,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -251,6 +266,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -269,6 +285,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -283,6 +300,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -301,6 +319,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -315,6 +334,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -333,6 +353,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -347,6 +368,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -365,6 +387,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -379,6 +402,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -397,6 +421,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -411,6 +436,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -429,6 +455,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +470,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -461,6 +489,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -475,6 +504,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -27,6 +28,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -45,6 +47,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +62,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -77,6 +81,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -91,6 +96,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -109,6 +115,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -123,6 +130,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -141,6 +149,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -155,6 +164,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +183,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -187,6 +198,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -205,6 +217,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -219,6 +232,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -237,6 +251,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -251,6 +266,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -269,6 +285,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -283,6 +300,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -301,6 +319,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -315,6 +334,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -333,6 +353,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -347,6 +368,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -365,6 +387,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -379,6 +402,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -397,6 +421,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -411,6 +436,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v10, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -429,6 +455,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +470,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v12, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -461,6 +489,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -475,6 +504,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f16( %v) ret %r @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f16( %v) ret %r @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f16( %v) ret %r @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f16( %v) ret %r @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv16f16( %v) ret %r @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv32f16( %v) ret %r @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f32( %v) ret %r @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f32( %v) ret %r @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f32( %v) ret %r @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f32( %v) ret %r @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv16f32( %v) ret %r @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f64( %v) ret %r @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f64( %v) ret %r @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f64( %v) ret %r @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f64( %v) ret %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -53,6 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -63,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -75,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -85,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -119,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -141,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -153,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -163,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -185,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -207,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -219,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -229,6 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -241,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -263,6 +286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -285,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -297,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -307,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -329,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -341,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = fsub %va, %vb ret %vc @@ -351,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -363,6 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f16( %va, %b, %m, i32 %evl) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -35,6 +37,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -49,6 +52,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f16( %va, %b, %m, i32 %evl) ret %v @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f16( %va, %b, %m, i32 %evl) ret %v @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -143,6 +153,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -157,6 +168,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f16( %va, %b, %m, i32 %evl) ret %v @@ -183,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +211,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +226,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv16f16( %va, %b, %m, i32 %evl) ret %v @@ -237,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +269,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -265,6 +284,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv32f16( %va, %b, %m, i32 %evl) ret %v @@ -291,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -305,6 +327,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +342,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, half %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f32( %va, %b, %m, i32 %evl) ret %v @@ -345,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -359,6 +385,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -373,6 +400,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f32( %va, %b, %m, i32 %evl) ret %v @@ -399,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -413,6 +443,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +458,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f32( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +486,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -467,6 +501,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +516,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f32( %va, %b, %m, i32 %evl) ret %v @@ -507,6 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -521,6 +559,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +574,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv16f32( %va, %b, %m, i32 %evl) ret %v @@ -561,6 +602,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -575,6 +617,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +632,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, float %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv1f64( %va, %b, %m, i32 %evl) ret %v @@ -615,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -629,6 +675,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +690,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv2f64( %va, %b, %m, i32 %evl) ret %v @@ -669,6 +718,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -683,6 +733,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +748,7 @@ ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv4f64( %va, %b, %m, i32 %evl) ret %v @@ -723,6 +776,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -737,6 +791,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -751,6 +806,7 @@ ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -767,6 +823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv7f64( %va, %b, %m, i32 %evl) ret %v @@ -779,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fsub.nxv8f64( %va, %b, %m, i32 %evl) ret %v @@ -789,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -803,6 +862,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -817,6 +877,7 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, double %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -23,6 +24,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -35,6 +37,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -47,6 +50,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -59,6 +63,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -71,6 +76,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -83,6 +89,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -95,6 +102,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -107,6 +115,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -119,6 +128,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -131,6 +141,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -143,6 +154,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -155,6 +167,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -167,6 +180,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -179,6 +193,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -191,6 +206,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -203,6 +219,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -215,6 +232,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -227,6 +245,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -239,6 +258,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -251,6 +271,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -263,6 +284,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -275,6 +297,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -287,6 +310,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -299,6 +323,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -311,6 +336,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -323,6 +349,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -335,6 +362,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -347,6 +375,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -359,6 +388,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -370,6 +400,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -381,6 +412,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -392,6 +424,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -403,6 +436,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -414,6 +448,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -425,6 +460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -436,6 +472,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -447,6 +484,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -458,6 +496,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -469,6 +508,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -480,6 +520,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -491,6 +532,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -502,6 +544,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -513,6 +556,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -524,6 +568,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -535,6 +580,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -546,6 +592,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -557,6 +604,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -568,6 +616,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -579,6 +628,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -590,6 +640,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -601,6 +652,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -612,6 +664,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -623,6 +676,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -634,6 +688,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -645,6 +700,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -656,6 +712,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -667,6 +724,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -678,6 +736,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -689,6 +748,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -699,6 +759,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -709,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -720,6 +782,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -731,6 +794,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -742,6 +806,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -753,6 +818,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -763,6 +829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -773,6 +840,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -784,6 +852,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -795,6 +864,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -806,6 +876,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -817,6 +888,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -827,6 +899,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -837,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -848,6 +922,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -859,6 +934,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -870,6 +946,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -881,6 +958,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -891,6 +969,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -901,6 +980,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -912,6 +992,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -923,6 +1004,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -934,6 +1016,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -945,6 +1028,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -955,6 +1039,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -965,6 +1050,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -976,6 +1062,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -987,6 +1074,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -997,6 +1085,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1007,6 +1096,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1018,6 +1108,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1029,6 +1120,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1039,6 +1131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1049,6 +1142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1060,6 +1154,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1071,6 +1166,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1082,6 +1178,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1093,6 +1190,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1103,6 +1201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1113,6 +1212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1124,6 +1224,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1135,6 +1236,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1146,6 +1248,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1157,6 +1260,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1167,6 +1271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1177,6 +1282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1188,6 +1294,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1199,6 +1306,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1210,6 +1318,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1221,6 +1330,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1231,6 +1341,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1241,6 +1352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1252,6 +1364,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1263,6 +1376,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1274,6 +1388,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1285,6 +1400,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1295,6 +1411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1305,6 +1422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1317,6 +1435,7 @@ ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1329,6 +1448,7 @@ ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1340,6 +1460,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1351,6 +1472,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1361,6 +1483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1371,6 +1494,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1383,6 +1507,7 @@ ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1395,6 +1520,7 @@ ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1406,6 +1532,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1417,6 +1544,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1427,6 +1555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1437,6 +1566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1449,6 +1579,7 @@ ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1461,6 +1592,7 @@ ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1472,6 +1604,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1483,6 +1616,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1493,6 +1627,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1503,6 +1638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1515,6 +1651,7 @@ ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1527,6 +1664,7 @@ ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1538,6 +1676,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1549,6 +1688,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1559,6 +1699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1569,6 +1710,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -101,6 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -115,6 +122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -131,6 +139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -145,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -161,6 +171,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -175,6 +186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -191,6 +203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -207,6 +220,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -223,6 +237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -239,6 +254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -255,6 +271,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -287,6 +305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -304,6 +323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -320,6 +340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -337,6 +358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -353,6 +375,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -370,6 +393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -386,6 +410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -404,6 +429,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -420,6 +446,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -438,6 +465,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -454,6 +482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -472,6 +501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -488,6 +518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -507,6 +538,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -523,6 +555,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -542,6 +575,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -558,6 +592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -577,6 +612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -593,6 +629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -613,6 +650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -629,6 +667,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -649,6 +688,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -665,6 +705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -685,6 +726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -701,6 +743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -722,6 +765,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -738,6 +782,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -759,6 +804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -775,6 +821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -796,6 +843,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -812,6 +860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -826,6 +875,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -842,6 +892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -856,6 +907,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -872,6 +924,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -886,6 +939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -902,6 +956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -917,6 +972,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -933,6 +989,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -949,6 +1006,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -965,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -980,6 +1039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -996,6 +1056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1013,6 +1074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1029,6 +1091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1046,6 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1062,6 +1126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1078,6 +1143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1094,6 +1160,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1108,6 +1175,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1124,6 +1192,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1138,6 +1207,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1154,6 +1224,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1168,6 +1239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1184,6 +1256,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1200,6 +1273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1216,6 +1290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1232,6 +1307,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1248,6 +1324,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1264,6 +1341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1280,6 +1358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1297,6 +1376,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1313,6 +1393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1330,6 +1411,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1346,6 +1428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1363,6 +1446,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1379,6 +1463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1397,6 +1482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1413,6 +1499,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1431,6 +1518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1447,6 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1465,6 +1554,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1481,6 +1571,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1500,6 +1591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1516,6 +1608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1535,6 +1628,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1551,6 +1645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1570,6 +1665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1586,6 +1682,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1606,6 +1703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1622,6 +1720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1642,6 +1741,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1658,6 +1758,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1678,6 +1779,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1694,6 +1796,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1715,6 +1818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1731,6 +1835,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1752,6 +1857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1768,6 +1874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1789,6 +1896,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1805,6 +1913,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1819,6 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1835,6 +1945,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1849,6 +1960,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1865,6 +1977,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1879,6 +1992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1895,6 +2009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1911,6 +2026,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1927,6 +2043,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1943,6 +2060,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1959,6 +2077,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1974,6 +2093,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1990,6 +2110,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2007,6 +2128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2023,6 +2145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2040,6 +2163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2056,6 +2180,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2073,6 +2198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2089,6 +2215,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2107,6 +2234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2123,6 +2251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2141,6 +2270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2157,6 +2287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2175,6 +2306,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2191,6 +2323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2210,6 +2343,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2226,6 +2360,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2245,6 +2380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2261,6 +2397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2280,6 +2417,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2296,6 +2434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2316,6 +2455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2332,6 +2472,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2352,6 +2493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2368,6 +2510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2388,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2404,6 +2548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2425,6 +2570,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2441,6 +2587,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2462,6 +2609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2478,6 +2626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2499,6 +2648,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2515,6 +2665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2529,6 +2680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2545,6 +2697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2559,6 +2712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2575,6 +2729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2589,6 +2744,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2605,6 +2761,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2621,6 +2778,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2637,6 +2795,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2653,6 +2812,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2669,6 +2829,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2685,6 +2846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2701,6 +2863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2718,6 +2881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2734,6 +2898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2751,6 +2916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2767,6 +2933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2784,6 +2951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2800,6 +2968,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2818,6 +2987,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2834,6 +3004,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2852,6 +3023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2868,6 +3040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2886,6 +3059,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2902,6 +3076,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2921,6 +3096,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2937,6 +3113,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2956,6 +3133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2972,6 +3150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2991,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3007,6 +3187,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3027,6 +3208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3043,6 +3225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3063,6 +3246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3079,6 +3263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3099,6 +3284,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3115,6 +3301,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3136,6 +3323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3152,6 +3340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3173,6 +3362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3189,6 +3379,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3210,6 +3401,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3226,6 +3418,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3240,6 +3433,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3256,6 +3450,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3270,6 +3465,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3286,6 +3482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3300,6 +3497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3316,6 +3514,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3332,6 +3531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3348,6 +3548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3364,6 +3565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3380,6 +3582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3395,6 +3598,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3411,6 +3615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3428,6 +3633,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3444,6 +3650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3461,6 +3668,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3477,6 +3685,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3494,6 +3703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3510,6 +3720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3524,6 +3735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3540,6 +3752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3554,6 +3767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3570,6 +3784,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3584,6 +3799,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3600,6 +3816,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3615,6 +3832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3631,6 +3849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3647,6 +3866,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3663,6 +3883,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3678,6 +3899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3694,6 +3916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3711,6 +3934,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3727,6 +3951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3744,6 +3969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3760,6 +3986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3776,6 +4003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3792,6 +4020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3810,6 +4039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3826,6 +4056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3844,6 +4075,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3860,6 +4092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3877,6 +4110,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3893,6 +4127,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3912,6 +4147,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3928,6 +4164,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3947,6 +4184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3963,6 +4201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3982,6 +4221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3998,6 +4238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4018,6 +4259,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4034,6 +4276,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4054,6 +4297,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4070,6 +4314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4090,6 +4335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4106,6 +4352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4127,6 +4374,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4143,6 +4391,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4164,6 +4413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4180,6 +4430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4201,6 +4452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4217,6 +4469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4231,6 +4484,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4247,6 +4501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4261,6 +4516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4277,6 +4533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -4291,6 +4548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4307,6 +4565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4321,6 +4580,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4337,6 +4597,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4351,6 +4612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4367,6 +4629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4381,6 +4644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4397,6 +4661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4413,6 +4678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4429,6 +4695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4445,6 +4712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4461,6 +4729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4476,6 +4745,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4492,6 +4762,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4509,6 +4780,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4525,6 +4797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4542,6 +4815,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4558,6 +4832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4575,6 +4850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4591,6 +4867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4609,6 +4886,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4625,6 +4903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4643,6 +4922,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4659,6 +4939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4677,6 +4958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4693,6 +4975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4712,6 +4995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4728,6 +5012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4747,6 +5032,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4763,6 +5049,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4782,6 +5069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4798,6 +5086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4818,6 +5107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4834,6 +5124,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4854,6 +5145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4870,6 +5162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4890,6 +5183,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4906,6 +5200,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4927,6 +5222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4943,6 +5239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4964,6 +5261,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4980,6 +5278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -5001,6 +5300,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5017,6 +5317,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5031,6 +5332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5047,6 +5349,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5061,6 +5364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5077,6 +5381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5091,6 +5396,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5107,6 +5413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5123,6 +5430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5139,6 +5447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5155,6 +5464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5171,6 +5481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5187,6 +5498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5203,6 +5515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5220,6 +5533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5236,6 +5550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5253,6 +5568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5269,6 +5585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5286,6 +5603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5302,6 +5620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5320,6 +5639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5336,6 +5656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5354,6 +5675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5370,6 +5692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5388,6 +5711,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5404,6 +5728,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5423,6 +5748,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5439,6 +5765,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5458,6 +5785,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5474,6 +5802,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5493,6 +5822,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5509,6 +5839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5529,6 +5860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5545,6 +5877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5565,6 +5898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5581,6 +5915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5601,6 +5936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5617,6 +5953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5638,6 +5975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5654,6 +5992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5675,6 +6014,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5691,6 +6031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5712,6 +6053,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5728,6 +6070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5742,6 +6085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5758,6 +6102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -5772,6 +6117,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5788,6 +6134,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5802,6 +6149,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5818,6 +6166,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5832,6 +6181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5848,6 +6198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5862,6 +6213,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5878,6 +6230,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5894,6 +6247,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5910,6 +6264,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5926,6 +6281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5942,6 +6298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5958,6 +6315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5974,6 +6332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5991,6 +6350,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6007,6 +6367,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6024,6 +6385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6040,6 +6402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6057,6 +6420,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6073,6 +6437,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6091,6 +6456,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6107,6 +6473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6125,6 +6492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6141,6 +6509,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6159,6 +6528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6175,6 +6545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6194,6 +6565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6210,6 +6582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6229,6 +6602,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6245,6 +6619,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6264,6 +6639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6280,6 +6656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6300,6 +6677,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6316,6 +6694,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6336,6 +6715,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6352,6 +6732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6372,6 +6753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6388,6 +6770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6409,6 +6792,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6425,6 +6809,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6446,6 +6831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6462,6 +6848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6483,6 +6870,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6499,6 +6887,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6513,6 +6902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6529,6 +6919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6543,6 +6934,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6559,6 +6951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6573,6 +6966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6589,6 +6983,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6605,6 +7000,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6621,6 +7017,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6637,6 +7034,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6653,6 +7051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6669,6 +7068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6685,6 +7085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6702,6 +7103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6718,6 +7120,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6735,6 +7138,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6751,6 +7155,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6768,6 +7173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6784,6 +7190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6802,6 +7209,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6818,6 +7226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6836,6 +7245,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6852,6 +7262,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6870,6 +7281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6886,6 +7298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6905,6 +7318,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6921,6 +7335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6940,6 +7355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6956,6 +7372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6975,6 +7392,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6991,6 +7409,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7011,6 +7430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7027,6 +7447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7047,6 +7468,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7063,6 +7485,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7083,6 +7506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7099,6 +7523,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7120,6 +7545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7136,6 +7562,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7157,6 +7584,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7173,6 +7601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7194,6 +7623,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7210,6 +7640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7224,6 +7655,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7240,6 +7672,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7254,6 +7687,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7270,6 +7704,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7284,6 +7719,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7300,6 +7736,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7316,6 +7753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7332,6 +7770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7348,6 +7787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7364,6 +7804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7380,6 +7821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7396,6 +7838,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7413,6 +7856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7429,6 +7873,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7446,6 +7891,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7462,6 +7908,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7479,6 +7926,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7495,6 +7943,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7509,6 +7958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7525,6 +7975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7539,6 +7990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7555,6 +8007,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7569,6 +8022,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7585,6 +8039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7599,6 +8054,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7615,6 +8071,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7629,6 +8086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7645,6 +8103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7659,6 +8118,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7675,6 +8135,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7689,6 +8150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7705,6 +8167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7719,6 +8182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7735,6 +8199,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7749,6 +8214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7765,6 +8231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7781,6 +8248,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7797,6 +8265,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7813,6 +8282,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7829,6 +8299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7845,6 +8316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7861,6 +8333,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7878,6 +8351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7894,6 +8368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7911,6 +8386,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7927,6 +8403,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7944,6 +8421,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7960,6 +8438,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7978,6 +8457,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7994,6 +8474,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8012,6 +8493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8028,6 +8510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8046,6 +8529,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8062,6 +8546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8081,6 +8566,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8097,6 +8583,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8116,6 +8603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8132,6 +8620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8151,6 +8640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8167,6 +8657,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8187,6 +8678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8203,6 +8695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8223,6 +8716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8239,6 +8733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8259,6 +8754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8275,6 +8771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8296,6 +8793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8312,6 +8810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8333,6 +8832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8349,6 +8849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8370,6 +8871,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8386,6 +8888,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8400,6 +8903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8416,6 +8920,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8430,6 +8935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8446,6 +8952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8460,6 +8967,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8476,6 +8984,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8492,6 +9001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8508,6 +9018,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8524,6 +9035,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8540,6 +9052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8556,6 +9069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8572,6 +9086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8589,6 +9104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8605,6 +9121,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8622,6 +9139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8638,6 +9156,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8655,6 +9174,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8671,6 +9191,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8689,6 +9210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8705,6 +9227,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8723,6 +9246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8739,6 +9263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8757,6 +9282,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8773,6 +9299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8792,6 +9319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8808,6 +9336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8827,6 +9356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8843,6 +9373,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8862,6 +9393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8878,6 +9410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8898,6 +9431,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8914,6 +9448,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8934,6 +9469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8950,6 +9486,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8970,6 +9507,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8986,6 +9524,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9007,6 +9546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9023,6 +9563,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9044,6 +9585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9060,6 +9602,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -9081,6 +9624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9097,6 +9641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9111,6 +9656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9127,6 +9673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9141,6 +9688,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9157,6 +9705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9171,6 +9720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9187,6 +9737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9203,6 +9754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9219,6 +9771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9235,6 +9788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9251,6 +9805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9267,6 +9822,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9283,6 +9839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9300,6 +9857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9316,6 +9874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9333,6 +9892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9349,6 +9909,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9366,6 +9927,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9382,6 +9944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9400,6 +9963,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9416,6 +9980,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9434,6 +9999,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9450,6 +10016,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9468,6 +10035,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9484,6 +10052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9503,6 +10072,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9519,6 +10089,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9538,6 +10109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9554,6 +10126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9573,6 +10146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9589,6 +10163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9609,6 +10184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9625,6 +10201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9645,6 +10222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9661,6 +10239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9681,6 +10260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9697,6 +10277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9718,6 +10299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9734,6 +10316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9755,6 +10338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9771,6 +10355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9792,6 +10377,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9808,6 +10394,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9822,6 +10409,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9838,6 +10426,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9852,6 +10441,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9868,6 +10458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9882,6 +10473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9898,6 +10490,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9914,6 +10507,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9930,6 +10524,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9946,6 +10541,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9962,6 +10558,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9978,6 +10575,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9994,6 +10592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10011,6 +10610,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10027,6 +10627,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10044,6 +10645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10060,6 +10662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10077,6 +10680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10093,6 +10697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10111,6 +10716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10127,6 +10733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10145,6 +10752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10161,6 +10769,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10179,6 +10788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10195,6 +10805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10214,6 +10825,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10230,6 +10842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10249,6 +10862,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10265,6 +10879,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10284,6 +10899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10300,6 +10916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10320,6 +10937,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10336,6 +10954,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10356,6 +10975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10372,6 +10992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10392,6 +11013,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10408,6 +11030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10429,6 +11052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10445,6 +11069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10466,6 +11091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10482,6 +11108,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10503,6 +11130,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10519,6 +11147,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10533,6 +11162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10549,6 +11179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10563,6 +11194,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10579,6 +11211,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10593,6 +11226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10609,6 +11243,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10625,6 +11260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10641,6 +11277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10657,6 +11294,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10673,6 +11311,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10688,6 +11327,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10704,6 +11344,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10721,6 +11362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10737,6 +11379,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10754,6 +11397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10770,6 +11414,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10787,6 +11432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10803,6 +11449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10817,6 +11464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10833,6 +11481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10847,6 +11496,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10863,6 +11513,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10877,6 +11528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10893,6 +11545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10907,6 +11560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10923,6 +11577,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10937,6 +11592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10953,6 +11609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10967,6 +11624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10983,6 +11641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10999,6 +11658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11015,6 +11675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11031,6 +11692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11047,6 +11709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11063,6 +11726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11079,6 +11743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11096,6 +11761,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11112,6 +11778,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11129,6 +11796,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11145,6 +11813,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11162,6 +11831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11178,6 +11848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11192,6 +11863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11208,6 +11880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11222,6 +11895,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11238,6 +11912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11252,6 +11927,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11268,6 +11944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11284,6 +11961,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11300,6 +11978,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11316,6 +11995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11332,6 +12012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11347,6 +12028,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11363,6 +12045,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11380,6 +12063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11396,6 +12080,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11413,6 +12098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11429,6 +12115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11446,6 +12133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11462,6 +12150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11480,6 +12169,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11496,6 +12186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11514,6 +12205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11530,6 +12222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11548,6 +12241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11564,6 +12258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11583,6 +12278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11599,6 +12295,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11618,6 +12315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11634,6 +12332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11653,6 +12352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11669,6 +12369,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11689,6 +12390,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11705,6 +12407,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11725,6 +12428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11741,6 +12445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11761,6 +12466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11777,6 +12483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11798,6 +12505,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11814,6 +12522,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11835,6 +12544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11851,6 +12561,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11872,6 +12583,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11888,6 +12600,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11902,6 +12615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11918,6 +12632,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -11932,6 +12647,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11948,6 +12664,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -11962,6 +12679,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11978,6 +12696,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11994,6 +12713,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12010,6 +12730,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12026,6 +12747,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12042,6 +12764,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12058,6 +12781,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12074,6 +12798,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12091,6 +12816,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12107,6 +12833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12124,6 +12851,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12140,6 +12868,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12157,6 +12886,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12173,6 +12903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12191,6 +12922,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12207,6 +12939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12225,6 +12958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12241,6 +12975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12259,6 +12994,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12275,6 +13011,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12294,6 +13031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12310,6 +13048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12329,6 +13068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12345,6 +13085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12364,6 +13105,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12380,6 +13122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12400,6 +13143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12416,6 +13160,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12436,6 +13181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12452,6 +13198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12472,6 +13219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12488,6 +13236,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12509,6 +13258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12525,6 +13275,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12546,6 +13297,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12562,6 +13314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12583,6 +13336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12599,6 +13353,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12613,6 +13368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12629,6 +13385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12643,6 +13400,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12659,6 +13417,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12673,6 +13432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12689,6 +13449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12705,6 +13466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12721,6 +13483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12737,6 +13500,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12753,6 +13517,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12769,6 +13534,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12785,6 +13551,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12802,6 +13569,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12818,6 +13586,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12835,6 +13604,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12851,6 +13621,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12868,6 +13639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -101,6 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -115,6 +122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -131,6 +139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -145,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -161,6 +171,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -175,6 +186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -191,6 +203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -205,6 +218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -221,6 +235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -237,6 +252,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -253,6 +269,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -269,6 +286,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -285,6 +303,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -300,6 +319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -316,6 +336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -332,6 +353,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -348,6 +370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -365,6 +388,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -381,6 +405,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -398,6 +423,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -414,6 +440,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -431,6 +458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -447,6 +475,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -464,6 +493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -480,6 +510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -494,6 +525,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -510,6 +542,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -524,6 +557,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -540,6 +574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -554,6 +589,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -570,6 +606,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -585,6 +622,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -601,6 +639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -617,6 +656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -633,6 +673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -648,6 +689,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -664,6 +706,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -681,6 +724,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -697,6 +741,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -714,6 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -730,6 +776,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -746,6 +793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -762,6 +810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -776,6 +825,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -792,6 +842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -806,6 +857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -822,6 +874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -836,6 +889,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -852,6 +906,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -866,6 +921,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -882,6 +938,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -898,6 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -914,6 +972,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -930,6 +989,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -946,6 +1006,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -962,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -978,6 +1040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -994,6 +1057,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1010,6 +1074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1027,6 +1092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1043,6 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1060,6 +1127,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1076,6 +1144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1093,6 +1162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1109,6 +1179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1126,6 +1197,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1142,6 +1214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1160,6 +1233,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1176,6 +1250,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1194,6 +1269,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1210,6 +1286,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1228,6 +1305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1244,6 +1322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1262,6 +1341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1278,6 +1358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1297,6 +1378,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1313,6 +1395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1332,6 +1415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1348,6 +1432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1367,6 +1452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1383,6 +1469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1402,6 +1489,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1418,6 +1506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1438,6 +1527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1454,6 +1544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1474,6 +1565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1490,6 +1582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1510,6 +1603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1526,6 +1620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1546,6 +1641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1562,6 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1583,6 +1680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1599,6 +1697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1620,6 +1719,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1636,6 +1736,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1657,6 +1758,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1673,6 +1775,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1694,6 +1797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1710,6 +1814,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1724,6 +1829,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1740,6 +1846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1754,6 +1861,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1770,6 +1878,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1784,6 +1893,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1800,6 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1814,6 +1925,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1830,6 +1942,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1846,6 +1959,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1862,6 +1976,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1878,6 +1993,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1894,6 +2010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1910,6 +2027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1926,6 +2044,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1942,6 +2061,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1958,6 +2078,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1975,6 +2096,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1991,6 +2113,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2008,6 +2131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2024,6 +2148,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2041,6 +2166,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2057,6 +2183,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2074,6 +2201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2090,6 +2218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2108,6 +2237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2124,6 +2254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2142,6 +2273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2158,6 +2290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2176,6 +2309,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2192,6 +2326,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2210,6 +2345,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2226,6 +2362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2245,6 +2382,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2261,6 +2399,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2280,6 +2419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2296,6 +2436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2315,6 +2456,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2331,6 +2473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2350,6 +2493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2366,6 +2510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2386,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2402,6 +2548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2422,6 +2569,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2438,6 +2586,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2458,6 +2607,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2474,6 +2624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2494,6 +2645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2510,6 +2662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2531,6 +2684,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2547,6 +2701,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2568,6 +2723,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2584,6 +2740,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2605,6 +2762,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2621,6 +2779,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2642,6 +2801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2658,6 +2818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2672,6 +2833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2688,6 +2850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2702,6 +2865,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2718,6 +2882,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2732,6 +2897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2748,6 +2914,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2762,6 +2929,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2778,6 +2946,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2794,6 +2963,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2810,6 +2980,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2826,6 +2997,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2842,6 +3014,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2857,6 +3030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2873,6 +3047,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2888,6 +3063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2904,6 +3080,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2921,6 +3098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2937,6 +3115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2954,6 +3133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2970,6 +3150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2986,6 +3167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -3002,6 +3184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -3019,6 +3202,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -3035,6 +3219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3049,6 +3234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3065,6 +3251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3079,6 +3266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3095,6 +3283,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3109,6 +3298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3125,6 +3315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3139,6 +3330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3155,6 +3347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3170,6 +3363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3186,6 +3380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3202,6 +3397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3218,6 +3414,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3233,6 +3430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3249,6 +3447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3265,6 +3464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3281,6 +3481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3298,6 +3499,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3314,6 +3516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3331,6 +3534,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3347,6 +3551,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3363,6 +3568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3379,6 +3585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3396,6 +3603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3412,6 +3620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3430,6 +3639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3446,6 +3656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3464,6 +3675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3480,6 +3692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3497,6 +3710,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3513,6 +3727,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3531,6 +3746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3547,6 +3763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3566,6 +3783,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3582,6 +3800,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3601,6 +3820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3617,6 +3837,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3636,6 +3857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3652,6 +3874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3671,6 +3894,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3687,6 +3911,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3707,6 +3932,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3723,6 +3949,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3743,6 +3970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3759,6 +3987,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3779,6 +4008,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3795,6 +4025,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3815,6 +4046,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3831,6 +4063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3852,6 +4085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3868,6 +4102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3889,6 +4124,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3905,6 +4141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3926,6 +4163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3942,6 +4180,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3963,6 +4202,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3979,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -3993,6 +4234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4009,6 +4251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4023,6 +4266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4039,6 +4283,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4053,6 +4298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4069,6 +4315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4083,6 +4330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4099,6 +4347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4115,6 +4364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4131,6 +4381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4147,6 +4398,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4163,6 +4415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4179,6 +4432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4195,6 +4449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4211,6 +4466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4227,6 +4483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4244,6 +4501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4260,6 +4518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4277,6 +4536,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4293,6 +4553,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4310,6 +4571,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4326,6 +4588,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4343,6 +4606,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4359,6 +4623,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4377,6 +4642,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4393,6 +4659,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4411,6 +4678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4427,6 +4695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4445,6 +4714,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4461,6 +4731,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4479,6 +4750,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4495,6 +4767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4514,6 +4787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4530,6 +4804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4549,6 +4824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4565,6 +4841,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4584,6 +4861,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4600,6 +4878,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4619,6 +4898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4635,6 +4915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4655,6 +4936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4671,6 +4953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4691,6 +4974,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4707,6 +4991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4727,6 +5012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4743,6 +5029,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4763,6 +5050,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4779,6 +5067,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4800,6 +5089,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4816,6 +5106,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4837,6 +5128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4853,6 +5145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4874,6 +5167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4890,6 +5184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4911,6 +5206,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4927,6 +5223,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4941,6 +5238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -4957,6 +5255,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4971,6 +5270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -4987,6 +5287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5001,6 +5302,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5017,6 +5319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5031,6 +5334,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5047,6 +5351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5063,6 +5368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5079,6 +5385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5095,6 +5402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5111,6 +5419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5127,6 +5436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5143,6 +5453,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5158,6 +5469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5174,6 +5486,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5191,6 +5504,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5207,6 +5521,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5224,6 +5539,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5240,6 +5556,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5257,6 +5574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5273,6 +5591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5290,6 +5609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5306,6 +5626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5324,6 +5645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5340,6 +5662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5358,6 +5681,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5374,6 +5698,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5392,6 +5717,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5408,6 +5734,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5426,6 +5753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5442,6 +5770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5461,6 +5790,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5477,6 +5807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5496,6 +5827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5512,6 +5844,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5531,6 +5864,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5547,6 +5881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5566,6 +5901,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5582,6 +5918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5602,6 +5939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5618,6 +5956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5638,6 +5977,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5654,6 +5994,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5674,6 +6015,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5690,6 +6032,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5710,6 +6053,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5726,6 +6070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5747,6 +6092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5763,6 +6109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5784,6 +6131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5800,6 +6148,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5821,6 +6170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5837,6 +6187,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5858,6 +6209,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5874,6 +6226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5888,6 +6241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5904,6 +6258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5918,6 +6273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5934,6 +6290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5948,6 +6305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5964,6 +6322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5978,6 +6337,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5994,6 +6354,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6009,6 +6370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6025,6 +6387,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6041,6 +6404,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6057,6 +6421,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6072,6 +6437,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6088,6 +6454,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6103,6 +6470,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6119,6 +6487,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6136,6 +6505,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6152,6 +6522,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6169,6 +6540,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6185,6 +6557,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6201,6 +6574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6217,6 +6591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6233,6 +6608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6249,6 +6625,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6267,6 +6644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6283,6 +6661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6301,6 +6680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6317,6 +6697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6334,6 +6715,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6350,6 +6732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6367,6 +6750,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6383,6 +6767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6402,6 +6787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6418,6 +6804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6437,6 +6824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6453,6 +6841,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6471,6 +6860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6487,6 +6877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6506,6 +6897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6522,6 +6914,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6542,6 +6935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6558,6 +6952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6578,6 +6973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6594,6 +6990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6613,6 +7010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6629,6 +7027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6649,6 +7048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6665,6 +7065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6686,6 +7087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6702,6 +7104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6723,6 +7126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6739,6 +7143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6759,6 +7164,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6775,6 +7181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6796,6 +7203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6812,6 +7220,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6826,6 +7235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6842,6 +7252,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6856,6 +7267,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6872,6 +7284,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6886,6 +7299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6902,6 +7316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6916,6 +7331,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6932,6 +7348,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6946,6 +7363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -6962,6 +7380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6976,6 +7395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -6992,6 +7412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7006,6 +7427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7022,6 +7444,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7036,6 +7459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7052,6 +7476,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7067,6 +7492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7083,6 +7509,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7099,6 +7526,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7115,6 +7543,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7130,6 +7559,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7146,6 +7576,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7162,6 +7593,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7178,6 +7610,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7195,6 +7628,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7211,6 +7645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7228,6 +7663,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7244,6 +7680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7260,6 +7697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7276,6 +7714,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7293,6 +7732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7309,6 +7749,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7327,6 +7768,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7343,6 +7785,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7361,6 +7804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7377,6 +7821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7394,6 +7839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7410,6 +7856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7428,6 +7875,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7444,6 +7892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7463,6 +7912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7479,6 +7929,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7498,6 +7949,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7514,6 +7966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7533,6 +7986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7549,6 +8003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7568,6 +8023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7584,6 +8040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7604,6 +8061,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7620,6 +8078,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7640,6 +8099,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7656,6 +8116,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7676,6 +8137,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7692,6 +8154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7712,6 +8175,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7728,6 +8192,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7749,6 +8214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7765,6 +8231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7786,6 +8253,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7802,6 +8270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7823,6 +8292,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7839,6 +8309,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7860,6 +8331,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7876,6 +8348,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -7890,6 +8363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7906,6 +8380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -7920,6 +8395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7936,6 +8412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -7950,6 +8427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7966,6 +8444,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -7980,6 +8459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7996,6 +8476,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8012,6 +8493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8028,6 +8510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8044,6 +8527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8060,6 +8544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8076,6 +8561,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8092,6 +8578,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8108,6 +8595,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8124,6 +8612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8141,6 +8630,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8157,6 +8647,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8174,6 +8665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8190,6 +8682,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8207,6 +8700,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8223,6 +8717,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8240,6 +8735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8256,6 +8752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8274,6 +8771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8290,6 +8788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8308,6 +8807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8324,6 +8824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8342,6 +8843,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8358,6 +8860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8376,6 +8879,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8392,6 +8896,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8411,6 +8916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8427,6 +8933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8446,6 +8953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8462,6 +8970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8481,6 +8990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8497,6 +9007,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8516,6 +9027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8532,6 +9044,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8552,6 +9065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8568,6 +9082,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8588,6 +9103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8604,6 +9120,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8624,6 +9141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8640,6 +9158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8660,6 +9179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8676,6 +9196,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8697,6 +9218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8713,6 +9235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8734,6 +9257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8750,6 +9274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8771,6 +9296,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8787,6 +9313,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8808,6 +9335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8824,6 +9352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8838,6 +9367,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8854,6 +9384,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8868,6 +9399,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8884,6 +9416,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -8898,6 +9431,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8914,6 +9448,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -8928,6 +9463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8944,6 +9480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8960,6 +9497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8976,6 +9514,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8992,6 +9531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9008,6 +9548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9024,6 +9565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9040,6 +9582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9055,6 +9598,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9071,6 +9615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9088,6 +9633,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9104,6 +9650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9121,6 +9668,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9137,6 +9685,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9154,6 +9703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9170,6 +9720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9187,6 +9738,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9203,6 +9755,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9221,6 +9774,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9237,6 +9791,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9255,6 +9810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9271,6 +9827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9289,6 +9846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9305,6 +9863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9323,6 +9882,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9339,6 +9899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9358,6 +9919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9374,6 +9936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9393,6 +9956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9409,6 +9973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9428,6 +9993,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9444,6 +10010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9463,6 +10030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9479,6 +10047,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9499,6 +10068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9515,6 +10085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9535,6 +10106,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9551,6 +10123,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9571,6 +10144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9587,6 +10161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9607,6 +10182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9623,6 +10199,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9644,6 +10221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9660,6 +10238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9681,6 +10260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9697,6 +10277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9718,6 +10299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9734,6 +10316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9755,6 +10338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9771,6 +10355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9785,6 +10370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9801,6 +10387,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9815,6 +10402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9831,6 +10419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9845,6 +10434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9861,6 +10451,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9875,6 +10466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9891,6 +10483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9905,6 +10498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9921,6 +10515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -9935,6 +10530,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9951,6 +10547,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -9965,6 +10562,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -9981,6 +10579,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -9995,6 +10594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10011,6 +10611,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10025,6 +10626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10041,6 +10643,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10055,6 +10658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10071,6 +10675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10087,6 +10692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10103,6 +10709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10119,6 +10726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10135,6 +10743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10151,6 +10760,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10167,6 +10777,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10182,6 +10793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10198,6 +10810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10215,6 +10828,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10231,6 +10845,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10248,6 +10863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10264,6 +10880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10281,6 +10898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10297,6 +10915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10314,6 +10933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10330,6 +10950,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10348,6 +10969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10364,6 +10986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10382,6 +11005,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10398,6 +11022,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10416,6 +11041,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10432,6 +11058,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10450,6 +11077,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10466,6 +11094,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10485,6 +11114,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10501,6 +11131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10520,6 +11151,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10536,6 +11168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10555,6 +11188,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10571,6 +11205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10590,6 +11225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10606,6 +11242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10626,6 +11263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10642,6 +11280,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10662,6 +11301,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10678,6 +11318,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10698,6 +11339,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10714,6 +11356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10734,6 +11377,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10750,6 +11394,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10771,6 +11416,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10787,6 +11433,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10808,6 +11455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10824,6 +11472,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10845,6 +11494,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10861,6 +11511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10882,6 +11533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10898,6 +11550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10912,6 +11565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10928,6 +11582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10942,6 +11597,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10958,6 +11614,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10972,6 +11629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10988,6 +11646,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11002,6 +11661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11018,6 +11678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11034,6 +11695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11050,6 +11712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11066,6 +11729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11082,6 +11746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11098,6 +11763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11114,6 +11780,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11130,6 +11797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11146,6 +11814,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11163,6 +11832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11179,6 +11849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11196,6 +11867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11212,6 +11884,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11229,6 +11902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11245,6 +11919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11262,6 +11937,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11278,6 +11954,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11292,6 +11969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11308,6 +11986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11322,6 +12001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11338,6 +12018,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11352,6 +12033,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11368,6 +12050,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11382,6 +12065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11398,6 +12082,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11412,6 +12097,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11428,6 +12114,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11442,6 +12129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11458,6 +12146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11472,6 +12161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11488,6 +12178,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11502,6 +12193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11518,6 +12210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11532,6 +12225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11548,6 +12242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11562,6 +12257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11578,6 +12274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11592,6 +12289,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11608,6 +12306,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11624,6 +12323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11640,6 +12340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11656,6 +12357,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11672,6 +12374,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11688,6 +12391,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11704,6 +12408,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11720,6 +12425,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11736,6 +12442,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11753,6 +12460,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11769,6 +12477,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11786,6 +12495,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11802,6 +12512,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11819,6 +12530,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11835,6 +12547,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11852,6 +12565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11868,6 +12582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11886,6 +12601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11902,6 +12618,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11920,6 +12637,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11936,6 +12654,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11954,6 +12673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11970,6 +12690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11988,6 +12709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12004,6 +12726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12023,6 +12746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12039,6 +12763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12058,6 +12783,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12074,6 +12800,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12093,6 +12820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12109,6 +12837,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12128,6 +12857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12144,6 +12874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12164,6 +12895,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12180,6 +12912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12200,6 +12933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12216,6 +12950,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12236,6 +12971,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12252,6 +12988,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12272,6 +13009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12288,6 +13026,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12309,6 +13048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12325,6 +13065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12346,6 +13087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12362,6 +13104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12383,6 +13126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12399,6 +13143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12420,6 +13165,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12436,6 +13182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12450,6 +13197,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12466,6 +13214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12480,6 +13229,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12496,6 +13246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12510,6 +13261,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12526,6 +13278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12540,6 +13293,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12556,6 +13310,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12572,6 +13327,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12588,6 +13344,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12604,6 +13361,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12620,6 +13378,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12636,6 +13395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12652,6 +13412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12667,6 +13428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12683,6 +13445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12700,6 +13463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12716,6 +13480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12733,6 +13498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12749,6 +13515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12766,6 +13533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12782,6 +13550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12799,6 +13568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12815,6 +13585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12833,6 +13604,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12849,6 +13621,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12867,6 +13640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12883,6 +13657,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12901,6 +13676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12917,6 +13693,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12935,6 +13712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12951,6 +13729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12970,6 +13749,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12986,6 +13766,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13005,6 +13786,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13021,6 +13803,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13040,6 +13823,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13056,6 +13840,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13075,6 +13860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13091,6 +13877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13111,6 +13898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13127,6 +13915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13147,6 +13936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13163,6 +13953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13183,6 +13974,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13199,6 +13991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13219,6 +14012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13235,6 +14029,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13256,6 +14051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13272,6 +14068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13293,6 +14090,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13309,6 +14107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13330,6 +14129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13346,6 +14146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13367,6 +14168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13383,6 +14185,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13397,6 +14200,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13413,6 +14217,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13427,6 +14232,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13443,6 +14249,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13457,6 +14264,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13473,6 +14281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13487,6 +14296,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13503,6 +14313,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13519,6 +14330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13535,6 +14347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13551,6 +14364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13567,6 +14381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13583,6 +14398,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13599,6 +14415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13615,6 +14432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13631,6 +14449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13648,6 +14467,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13664,6 +14484,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13681,6 +14502,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13697,6 +14519,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13714,6 +14537,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13730,6 +14554,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13747,6 +14572,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13763,6 +14589,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13781,6 +14608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13797,6 +14625,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13815,6 +14644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13831,6 +14661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13849,6 +14680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13865,6 +14697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13883,6 +14716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13899,6 +14733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13918,6 +14753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13934,6 +14770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13953,6 +14790,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13969,6 +14807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13988,6 +14827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14004,6 +14844,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14023,6 +14864,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14039,6 +14881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14059,6 +14902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14075,6 +14919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14095,6 +14940,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14111,6 +14957,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14131,6 +14978,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14147,6 +14995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14167,6 +15016,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14183,6 +15033,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14204,6 +15055,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14220,6 +15072,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14241,6 +15094,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14257,6 +15111,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14278,6 +15133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14294,6 +15150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14315,6 +15172,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14331,6 +15189,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14345,6 +15204,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14361,6 +15221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14375,6 +15236,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14391,6 +15253,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14405,6 +15268,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14421,6 +15285,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14435,6 +15300,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14451,6 +15317,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14467,6 +15334,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14483,6 +15351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14499,6 +15368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14515,6 +15385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14531,6 +15402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14547,6 +15419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14563,6 +15436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14579,6 +15453,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14596,6 +15471,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14612,6 +15488,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14629,6 +15506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14645,6 +15523,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14662,6 +15541,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14678,6 +15558,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14695,6 +15576,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14711,6 +15593,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14729,6 +15612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14745,6 +15629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14763,6 +15648,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14779,6 +15665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14797,6 +15684,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14813,6 +15701,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14831,6 +15720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14847,6 +15737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14866,6 +15757,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14882,6 +15774,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14901,6 +15794,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14917,6 +15811,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14936,6 +15831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14952,6 +15848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14971,6 +15868,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14987,6 +15885,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15007,6 +15906,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15023,6 +15923,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15043,6 +15944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15059,6 +15961,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15079,6 +15982,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15095,6 +15999,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15115,6 +16020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15131,6 +16037,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15152,6 +16059,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15168,6 +16076,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15189,6 +16098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15205,6 +16115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15226,6 +16137,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15242,6 +16154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15263,6 +16176,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15279,6 +16193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15293,6 +16208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15309,6 +16225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15323,6 +16240,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15339,6 +16257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15353,6 +16272,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15369,6 +16289,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15383,6 +16304,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15399,6 +16321,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15415,6 +16338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15431,6 +16355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15447,6 +16372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15463,6 +16389,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15478,6 +16405,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15494,6 +16422,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15509,6 +16438,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15525,6 +16455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15542,6 +16473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15558,6 +16490,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15575,6 +16508,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15591,6 +16525,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15607,6 +16542,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15623,6 +16559,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15640,6 +16577,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15656,6 +16594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15670,6 +16609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15686,6 +16626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15700,6 +16641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15716,6 +16658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15730,6 +16673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15746,6 +16690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15760,6 +16705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15776,6 +16722,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15790,6 +16737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15806,6 +16754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15820,6 +16769,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15836,6 +16786,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15850,6 +16801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15866,6 +16818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15880,6 +16833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15896,6 +16850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15912,6 +16867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15928,6 +16884,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15944,6 +16901,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15960,6 +16918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15976,6 +16935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15992,6 +16952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16008,6 +16969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16024,6 +16986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16041,6 +17004,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16057,6 +17021,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16074,6 +17039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16090,6 +17056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16107,6 +17074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16123,6 +17091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16140,6 +17109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16156,6 +17126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16170,6 +17141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16186,6 +17158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16200,6 +17173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16216,6 +17190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16230,6 +17205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16246,6 +17222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16260,6 +17237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16276,6 +17254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16291,6 +17270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16307,6 +17287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16323,6 +17304,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16339,6 +17321,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16354,6 +17337,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16370,6 +17354,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16386,6 +17371,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16402,6 +17388,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16419,6 +17406,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16435,6 +17423,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16452,6 +17441,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16468,6 +17458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16484,6 +17475,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16500,6 +17492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16517,6 +17510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16533,6 +17527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16551,6 +17546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16567,6 +17563,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16585,6 +17582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16601,6 +17599,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16618,6 +17617,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16634,6 +17634,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16652,6 +17653,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16668,6 +17670,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16687,6 +17690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16703,6 +17707,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16722,6 +17727,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16738,6 +17744,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16757,6 +17764,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16773,6 +17781,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16792,6 +17801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16808,6 +17818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16828,6 +17839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16844,6 +17856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16864,6 +17877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16880,6 +17894,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16900,6 +17915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16916,6 +17932,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16936,6 +17953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16952,6 +17970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16973,6 +17992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16989,6 +18009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17010,6 +18031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17026,6 +18048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17047,6 +18070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17063,6 +18087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -17084,6 +18109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17100,6 +18126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17114,6 +18141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17130,6 +18158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17144,6 +18173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17160,6 +18190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17174,6 +18205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17190,6 +18222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17204,6 +18237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17220,6 +18254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17236,6 +18271,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17252,6 +18288,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17268,6 +18305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17284,6 +18322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17300,6 +18339,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17316,6 +18356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17331,6 +18372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17347,6 +18389,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17364,6 +18407,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17380,6 +18424,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17397,6 +18442,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17413,6 +18459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17430,6 +18477,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17446,6 +18494,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17463,6 +18512,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17479,6 +18529,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17497,6 +18548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17513,6 +18565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17531,6 +18584,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17547,6 +18601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17565,6 +18620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17581,6 +18637,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17599,6 +18656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17615,6 +18673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17634,6 +18693,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17650,6 +18710,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17669,6 +18730,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17685,6 +18747,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17704,6 +18767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17720,6 +18784,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17739,6 +18804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17755,6 +18821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17775,6 +18842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17791,6 +18859,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17811,6 +18880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17827,6 +18897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17847,6 +18918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17863,6 +18935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17883,6 +18956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17899,6 +18973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17920,6 +18995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17936,6 +19012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17957,6 +19034,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17973,6 +19051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17994,6 +19073,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -18010,6 +19090,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -18031,6 +19112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -18047,6 +19129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18061,6 +19144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18077,6 +19161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18091,6 +19176,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18107,6 +19193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18121,6 +19208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18137,6 +19225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18151,6 +19240,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18167,6 +19257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18183,6 +19274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18199,6 +19291,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18215,6 +19308,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18231,6 +19325,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18246,6 +19341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18262,6 +19358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18278,6 +19375,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18294,6 +19392,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18311,6 +19410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18327,6 +19427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18344,6 +19445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18360,6 +19462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18377,6 +19480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18393,6 +19497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18410,6 +19515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) @@ -59,6 +62,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl) @@ -77,6 +81,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) @@ -93,6 +98,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl) @@ -111,6 +117,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) @@ -128,6 +135,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl) @@ -146,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) @@ -164,6 +173,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl) @@ -182,6 +192,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) @@ -201,6 +212,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl) @@ -219,6 +231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) @@ -239,6 +252,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl) @@ -257,6 +271,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) @@ -278,6 +293,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl) @@ -296,6 +312,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) @@ -311,6 +328,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl) @@ -329,6 +347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) @@ -345,6 +364,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl) @@ -363,6 +383,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) @@ -380,6 +401,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl) @@ -398,6 +420,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) @@ -413,6 +436,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl) @@ -431,6 +455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) @@ -447,6 +472,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl) @@ -465,6 +491,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) @@ -482,6 +509,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl) @@ -500,6 +528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) @@ -518,6 +547,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl) @@ -536,6 +566,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) @@ -555,6 +586,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl) @@ -573,6 +605,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) @@ -593,6 +626,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl) @@ -611,6 +645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) @@ -632,6 +667,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl) @@ -650,6 +686,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) @@ -665,6 +702,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl) @@ -683,6 +721,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) @@ -699,6 +738,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl) @@ -717,6 +757,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) @@ -734,6 +775,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl) @@ -752,6 +794,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) @@ -770,6 +813,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl) @@ -788,6 +832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) @@ -807,6 +852,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl) @@ -825,6 +871,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) @@ -845,6 +892,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl) @@ -863,6 +911,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) @@ -884,6 +933,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl) @@ -902,6 +952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) @@ -917,6 +968,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl) @@ -935,6 +987,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) @@ -951,6 +1004,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl) @@ -969,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) @@ -986,6 +1041,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl) @@ -1004,6 +1060,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) @@ -1022,6 +1079,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl) @@ -1040,6 +1098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) @@ -1059,6 +1118,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl) @@ -1077,6 +1137,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) @@ -1097,6 +1158,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl) @@ -1115,6 +1177,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) @@ -1136,6 +1199,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl) @@ -1154,6 +1218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) @@ -1169,6 +1234,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl) @@ -1187,6 +1253,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) @@ -1203,6 +1270,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl) @@ -1221,6 +1289,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) @@ -1238,6 +1307,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl) @@ -1256,6 +1326,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) @@ -1271,6 +1342,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl) @@ -1289,6 +1361,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) @@ -1305,6 +1378,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl) @@ -1323,6 +1397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) @@ -1340,6 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl) @@ -1358,6 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) @@ -1376,6 +1453,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl) @@ -1394,6 +1472,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) @@ -1413,6 +1492,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl) @@ -1431,6 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) @@ -1451,6 +1532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl) @@ -1469,6 +1551,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) @@ -1490,6 +1573,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl) @@ -1508,6 +1592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) @@ -1523,6 +1608,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl) @@ -1541,6 +1627,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) @@ -1556,6 +1643,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl) @@ -1574,6 +1662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) @@ -1590,6 +1679,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl) @@ -1608,6 +1698,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) @@ -1625,6 +1716,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl) @@ -1643,6 +1735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) @@ -1661,6 +1754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl) @@ -1679,6 +1773,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) @@ -1698,6 +1793,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl) @@ -1716,6 +1812,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) @@ -1736,6 +1833,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl) @@ -1754,6 +1852,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) @@ -1775,6 +1874,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl) @@ -1793,6 +1893,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) @@ -1808,6 +1909,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl) @@ -1826,6 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) @@ -1842,6 +1945,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl) @@ -1860,6 +1964,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) @@ -1877,6 +1982,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl) @@ -1895,6 +2001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) @@ -1913,6 +2020,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl) @@ -1931,6 +2039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) @@ -1950,6 +2059,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl) @@ -1968,6 +2078,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) @@ -1988,6 +2099,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl) @@ -2006,6 +2118,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) @@ -2027,6 +2140,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl) @@ -2045,6 +2159,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) @@ -2060,6 +2175,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl) @@ -2078,6 +2194,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) @@ -2093,6 +2210,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl) @@ -2111,6 +2229,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) @@ -2127,6 +2246,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl) @@ -2145,6 +2265,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) @@ -2162,6 +2283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl) @@ -2180,6 +2302,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) @@ -2198,6 +2321,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl) @@ -2216,6 +2340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) @@ -2235,6 +2360,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl) @@ -2253,6 +2379,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) @@ -2273,6 +2400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl) @@ -2291,6 +2419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) @@ -2312,6 +2441,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl) @@ -2330,6 +2460,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) @@ -2345,6 +2476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl) @@ -2363,6 +2495,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) @@ -2379,6 +2512,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl) @@ -2397,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) @@ -2414,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl) @@ -2432,6 +2568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) @@ -2450,6 +2587,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl) @@ -2468,6 +2606,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) @@ -2487,6 +2626,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl) @@ -2505,6 +2645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) @@ -2525,6 +2666,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl) @@ -2543,6 +2685,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) @@ -2564,6 +2707,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl) @@ -2582,6 +2726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) @@ -2597,6 +2742,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl) @@ -2615,6 +2761,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) @@ -2631,6 +2778,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl) @@ -2649,6 +2797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) @@ -2666,6 +2815,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl) @@ -2684,6 +2834,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) @@ -2699,6 +2850,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl) @@ -2717,6 +2869,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) @@ -2732,6 +2885,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl) @@ -2750,6 +2904,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) @@ -2765,6 +2920,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl) @@ -2783,6 +2939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) @@ -2799,6 +2956,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl) @@ -2817,6 +2975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) @@ -2834,6 +2993,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl) @@ -2852,6 +3012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) @@ -2870,6 +3031,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl) @@ -2888,6 +3050,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) @@ -2907,6 +3070,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl) @@ -2925,6 +3089,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) @@ -2945,6 +3110,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl) @@ -2963,6 +3129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) @@ -2984,6 +3151,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl) @@ -3002,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) @@ -3017,6 +3186,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl) @@ -3035,6 +3205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) @@ -3051,6 +3222,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl) @@ -3069,6 +3241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) @@ -3086,6 +3259,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl) @@ -3104,6 +3278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) @@ -3122,6 +3297,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl) @@ -3140,6 +3316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) @@ -3159,6 +3336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl) @@ -3177,6 +3355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) @@ -3197,6 +3376,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl) @@ -3215,6 +3395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) @@ -3236,6 +3417,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl) @@ -3254,6 +3436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) @@ -3269,6 +3452,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl) @@ -3287,6 +3471,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) @@ -3303,6 +3488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl) @@ -3321,6 +3507,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) @@ -3338,6 +3525,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl) @@ -3356,6 +3544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) @@ -3374,6 +3563,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl) @@ -3392,6 +3582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) @@ -3411,6 +3602,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl) @@ -3429,6 +3621,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) @@ -3449,6 +3642,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl) @@ -3467,6 +3661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) @@ -3488,6 +3683,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl) @@ -3506,6 +3702,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) @@ -3521,6 +3718,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl) @@ -3539,6 +3737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) @@ -3555,6 +3754,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl) @@ -3573,6 +3773,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) @@ -3590,6 +3791,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl) @@ -3608,6 +3810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) @@ -3626,6 +3829,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl) @@ -3644,6 +3848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) @@ -3663,6 +3868,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl) @@ -3681,6 +3887,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) @@ -3701,6 +3908,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl) @@ -3719,6 +3927,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) @@ -3740,6 +3949,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl) @@ -3758,6 +3968,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) @@ -3773,6 +3984,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl) @@ -3791,6 +4003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) @@ -3807,6 +4020,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl) @@ -3825,6 +4039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) @@ -3842,6 +4057,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl) @@ -3860,6 +4076,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) @@ -3875,6 +4092,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl) @@ -3893,6 +4111,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) @@ -3908,6 +4127,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl) @@ -3926,6 +4146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) @@ -3942,6 +4163,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl) @@ -3960,6 +4182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) @@ -3977,6 +4200,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl) @@ -3995,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) @@ -4010,6 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl) @@ -4028,6 +4254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) @@ -4044,6 +4271,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl) @@ -4062,6 +4290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) @@ -4079,6 +4308,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl) @@ -4097,6 +4327,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) @@ -4115,6 +4346,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl) @@ -4133,6 +4365,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) @@ -4152,6 +4385,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl) @@ -4170,6 +4404,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) @@ -4190,6 +4425,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl) @@ -4208,6 +4444,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) @@ -4229,6 +4466,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl) @@ -4247,6 +4485,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) @@ -4262,6 +4501,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl) @@ -4280,6 +4520,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) @@ -4296,6 +4537,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl) @@ -4314,6 +4556,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) @@ -4331,6 +4574,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl) @@ -4349,6 +4593,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) @@ -4367,6 +4612,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl) @@ -4385,6 +4631,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) @@ -4404,6 +4651,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl) @@ -4422,6 +4670,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) @@ -4442,6 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl) @@ -4460,6 +4710,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) @@ -4481,6 +4732,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl) @@ -4499,6 +4751,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) @@ -4514,6 +4767,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl) @@ -4532,6 +4786,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) @@ -4548,6 +4803,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl) @@ -4566,6 +4822,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) @@ -4583,6 +4840,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) @@ -59,6 +62,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl) @@ -77,6 +81,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) @@ -93,6 +98,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl) @@ -111,6 +117,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) @@ -128,6 +135,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl) @@ -146,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) @@ -161,6 +170,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl) @@ -179,6 +189,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) @@ -195,6 +206,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl) @@ -213,6 +225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) @@ -230,6 +243,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl) @@ -248,6 +262,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) @@ -263,6 +278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl) @@ -281,6 +297,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) @@ -297,6 +314,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl) @@ -315,6 +333,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) @@ -332,6 +351,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl) @@ -350,6 +370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) @@ -368,6 +389,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl) @@ -386,6 +408,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) @@ -405,6 +428,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl) @@ -423,6 +447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) @@ -443,6 +468,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl) @@ -461,6 +487,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) @@ -482,6 +509,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl) @@ -500,6 +528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) @@ -515,6 +544,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl) @@ -533,6 +563,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) @@ -549,6 +580,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl) @@ -567,6 +599,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) @@ -584,6 +617,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl) @@ -602,6 +636,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) @@ -620,6 +655,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl) @@ -638,6 +674,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) @@ -657,6 +694,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl) @@ -675,6 +713,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) @@ -695,6 +734,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl) @@ -713,6 +753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) @@ -734,6 +775,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl) @@ -752,6 +794,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) @@ -767,6 +810,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl) @@ -785,6 +829,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) @@ -801,6 +846,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl) @@ -819,6 +865,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) @@ -836,6 +883,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl) @@ -854,6 +902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) @@ -869,6 +918,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl) @@ -887,6 +937,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) @@ -903,6 +954,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl) @@ -921,6 +973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) @@ -938,6 +991,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl) @@ -956,6 +1010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) @@ -974,6 +1029,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl) @@ -992,6 +1048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) @@ -1011,6 +1068,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl) @@ -1029,6 +1087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) @@ -1049,6 +1108,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl) @@ -1067,6 +1127,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) @@ -1088,6 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl) @@ -1106,6 +1168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) @@ -1121,6 +1184,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl) @@ -1139,6 +1203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) @@ -1155,6 +1220,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl) @@ -1173,6 +1239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) @@ -1190,6 +1257,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl) @@ -1208,6 +1276,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) @@ -1226,6 +1295,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl) @@ -1244,6 +1314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) @@ -1263,6 +1334,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl) @@ -1281,6 +1353,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) @@ -1301,6 +1374,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl) @@ -1319,6 +1393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) @@ -1340,6 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl) @@ -1358,6 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) @@ -1373,6 +1450,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl) @@ -1391,6 +1469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) @@ -1407,6 +1486,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl) @@ -1425,6 +1505,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) @@ -1442,6 +1523,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl) @@ -1460,6 +1542,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) @@ -1478,6 +1561,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl) @@ -1496,6 +1580,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) @@ -1515,6 +1600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl) @@ -1533,6 +1619,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) @@ -1553,6 +1640,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl) @@ -1571,6 +1659,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) @@ -1592,6 +1681,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl) @@ -1610,6 +1700,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) @@ -1625,6 +1716,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl) @@ -1643,6 +1735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) @@ -1659,6 +1752,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl) @@ -1677,6 +1771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) @@ -1694,6 +1789,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl) @@ -1712,6 +1808,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) @@ -1730,6 +1827,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl) @@ -1748,6 +1846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) @@ -1767,6 +1866,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl) @@ -1785,6 +1885,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) @@ -1805,6 +1906,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl) @@ -1823,6 +1925,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) @@ -1844,6 +1947,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl) @@ -1862,6 +1966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) @@ -1877,6 +1982,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl) @@ -1895,6 +2001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) @@ -1910,6 +2017,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl) @@ -1928,6 +2036,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) @@ -1944,6 +2053,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl) @@ -1962,6 +2072,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) @@ -1979,6 +2090,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl) @@ -1997,6 +2109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) @@ -2015,6 +2128,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl) @@ -2033,6 +2147,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) @@ -2052,6 +2167,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl) @@ -2070,6 +2186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) @@ -2090,6 +2207,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl) @@ -2108,6 +2226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) @@ -2129,6 +2248,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl) @@ -2147,6 +2267,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) @@ -2162,6 +2283,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl) @@ -2180,6 +2302,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) @@ -2196,6 +2319,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl) @@ -2214,6 +2338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) @@ -2231,6 +2356,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl) @@ -2249,6 +2375,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) @@ -2267,6 +2394,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl) @@ -2285,6 +2413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) @@ -2304,6 +2433,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl) @@ -2322,6 +2452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) @@ -2342,6 +2473,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl) @@ -2360,6 +2492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) @@ -2381,6 +2514,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl) @@ -2399,6 +2533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) @@ -2414,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl) @@ -2432,6 +2568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) @@ -2448,6 +2585,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl) @@ -2466,6 +2604,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) @@ -2483,6 +2622,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl) @@ -2501,6 +2641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) @@ -2519,6 +2660,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl) @@ -2537,6 +2679,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) @@ -2556,6 +2699,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl) @@ -2574,6 +2718,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) @@ -2594,6 +2739,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl) @@ -2612,6 +2758,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) @@ -2633,6 +2780,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl) @@ -2651,6 +2799,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) @@ -2666,6 +2815,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl) @@ -2684,6 +2834,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) @@ -2699,6 +2850,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl) @@ -2717,6 +2869,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) @@ -2732,6 +2885,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl) @@ -2750,6 +2904,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) @@ -2766,6 +2921,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl) @@ -2784,6 +2940,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) @@ -2801,6 +2958,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl) @@ -2819,6 +2977,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) @@ -2837,6 +2996,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl) @@ -2855,6 +3015,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) @@ -2874,6 +3035,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl) @@ -2892,6 +3054,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) @@ -2912,6 +3075,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl) @@ -2930,6 +3094,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) @@ -2951,6 +3116,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl) @@ -2969,6 +3135,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) @@ -2984,6 +3151,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl) @@ -3002,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) @@ -3018,6 +3187,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl) @@ -3036,6 +3206,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) @@ -3053,6 +3224,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl) @@ -3071,6 +3243,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) @@ -3086,6 +3259,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl) @@ -3104,6 +3278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) @@ -3119,6 +3294,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl) @@ -3137,6 +3313,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) @@ -3152,6 +3329,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl) @@ -3170,6 +3348,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) @@ -3186,6 +3365,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl) @@ -3204,6 +3384,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) @@ -3221,6 +3402,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl) @@ -3239,6 +3421,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) @@ -3257,6 +3440,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl) @@ -3275,6 +3459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) @@ -3294,6 +3479,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl) @@ -3312,6 +3498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) @@ -3332,6 +3519,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl) @@ -3350,6 +3538,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) @@ -3371,6 +3560,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl) @@ -3389,6 +3579,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) @@ -3404,6 +3595,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl) @@ -3422,6 +3614,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) @@ -3438,6 +3631,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl) @@ -3456,6 +3650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) @@ -3473,6 +3668,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl) @@ -3491,6 +3687,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) @@ -3509,6 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl) @@ -3527,6 +3725,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) @@ -3546,6 +3745,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl) @@ -3564,6 +3764,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) @@ -3584,6 +3785,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl) @@ -3602,6 +3804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) @@ -3623,6 +3826,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl) @@ -3641,6 +3845,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) @@ -3656,6 +3861,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl) @@ -3674,6 +3880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) @@ -3690,6 +3897,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl) @@ -3708,6 +3916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) @@ -3725,6 +3934,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl) @@ -3743,6 +3953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) @@ -3761,6 +3972,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl) @@ -3779,6 +3991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) @@ -3798,6 +4011,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl) @@ -3816,6 +4030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) @@ -3836,6 +4051,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl) @@ -3854,6 +4070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) @@ -3875,6 +4092,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl) @@ -3893,6 +4111,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) @@ -3908,6 +4127,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl) @@ -3926,6 +4146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) @@ -3942,6 +4163,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl) @@ -3960,6 +4182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) @@ -3977,6 +4200,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl) @@ -3995,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) @@ -4013,6 +4238,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl) @@ -4031,6 +4257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) @@ -4050,6 +4277,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl) @@ -4068,6 +4296,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) @@ -4088,6 +4317,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl) @@ -4106,6 +4336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) @@ -4127,6 +4358,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl) @@ -4145,6 +4377,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) @@ -4160,6 +4393,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl) @@ -4178,6 +4412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) @@ -4194,6 +4429,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl) @@ -4212,6 +4448,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) @@ -4229,6 +4466,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl) @@ -4247,6 +4485,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) @@ -4262,6 +4501,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl) @@ -4280,6 +4520,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) @@ -4295,6 +4536,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl) @@ -4313,6 +4555,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) @@ -4329,6 +4572,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl) @@ -4347,6 +4591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) @@ -4364,6 +4609,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl) @@ -4382,6 +4628,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) @@ -4397,6 +4644,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl) @@ -4415,6 +4663,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) @@ -4431,6 +4680,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl) @@ -4449,6 +4699,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) @@ -4466,6 +4717,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl) @@ -4484,6 +4736,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) @@ -4502,6 +4755,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl) @@ -4520,6 +4774,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) @@ -4539,6 +4794,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl) @@ -4557,6 +4813,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) @@ -4577,6 +4834,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl) @@ -4595,6 +4853,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) @@ -4616,6 +4875,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl) @@ -4634,6 +4894,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) @@ -4649,6 +4910,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl) @@ -4667,6 +4929,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) @@ -4683,6 +4946,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl) @@ -4701,6 +4965,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) @@ -4718,6 +4983,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl) @@ -4736,6 +5002,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) @@ -4754,6 +5021,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl) @@ -4772,6 +5040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) @@ -4791,6 +5060,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl) @@ -4809,6 +5079,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) @@ -4829,6 +5100,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl) @@ -4847,6 +5119,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) @@ -4868,6 +5141,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl) @@ -4886,6 +5160,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) @@ -4901,6 +5176,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl) @@ -4919,6 +5195,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) @@ -4935,6 +5212,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl) @@ -4953,6 +5231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) @@ -4970,6 +5249,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v0, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) @@ -29,6 +30,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -43,6 +45,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) @@ -57,6 +60,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -69,6 +73,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) @@ -82,6 +87,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl) @@ -31,6 +32,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -51,6 +53,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i32 %vl) @@ -69,6 +72,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -89,6 +93,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i32 %vl) @@ -108,6 +113,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -128,6 +134,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i32 %vl) @@ -148,6 +155,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -168,6 +176,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i32 %vl) @@ -189,6 +198,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -209,6 +219,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i32 %vl) @@ -231,6 +242,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -251,6 +263,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i32 %vl) @@ -274,6 +287,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -294,6 +308,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i32 %vl) @@ -318,6 +333,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -338,6 +354,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl) @@ -356,6 +373,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -376,6 +394,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i32 %vl) @@ -395,6 +414,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -415,6 +435,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i32 %vl) @@ -435,6 +456,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -455,6 +477,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl) @@ -473,6 +496,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -493,6 +517,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i32 %vl) @@ -512,6 +537,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -532,6 +558,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i32 %vl) @@ -552,6 +579,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -572,6 +600,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i32 %vl) @@ -593,6 +622,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -613,6 +643,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i32 %vl) @@ -635,6 +666,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -655,6 +687,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i32 %vl) @@ -678,6 +711,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -698,6 +732,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i32 %vl) @@ -722,6 +757,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -742,6 +778,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl) @@ -760,6 +797,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -780,6 +818,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i32 %vl) @@ -799,6 +838,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -819,6 +859,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i32 %vl) @@ -839,6 +880,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -859,6 +901,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i32 %vl) @@ -880,6 +923,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -900,6 +944,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i32 %vl) @@ -922,6 +967,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -942,6 +988,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i32 %vl) @@ -965,6 +1012,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -985,6 +1033,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i32 %vl) @@ -1009,6 +1058,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -1029,6 +1079,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i32 %vl) @@ -1047,6 +1098,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1067,6 +1119,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i32 %vl) @@ -1086,6 +1139,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1106,6 +1160,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i32 %vl) @@ -1126,6 +1181,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1146,6 +1202,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i32 %vl) @@ -1167,6 +1224,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1187,6 +1245,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i32 %vl) @@ -1209,6 +1268,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1229,6 +1289,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i32 %vl) @@ -1252,6 +1313,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1272,6 +1334,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i32 %vl) @@ -1296,6 +1359,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1316,6 +1380,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl) @@ -1334,6 +1399,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -1354,6 +1420,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i32 %vl) @@ -1373,6 +1440,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -1393,6 +1461,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i32 %vl) @@ -1413,6 +1482,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -1433,6 +1503,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl) @@ -1451,6 +1522,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1471,6 +1543,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i32 %vl) @@ -1490,6 +1563,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1510,6 +1584,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i32 %vl) @@ -1530,6 +1605,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1550,6 +1626,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i32 %vl) @@ -1571,6 +1648,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1591,6 +1669,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i32 %vl) @@ -1613,6 +1692,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1633,6 +1713,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i32 %vl) @@ -1656,6 +1737,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1676,6 +1758,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i32 %vl) @@ -1700,6 +1783,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1720,6 +1804,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i32 %vl) @@ -1738,6 +1823,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -1758,6 +1844,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i32 %vl) @@ -1776,6 +1863,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1796,6 +1884,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i32 %vl) @@ -1815,6 +1904,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1835,6 +1925,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i32 %vl) @@ -1855,6 +1946,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1875,6 +1967,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i32 %vl) @@ -1896,6 +1989,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1916,6 +2010,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i32 %vl) @@ -1938,6 +2033,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -1958,6 +2054,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i32 %vl) @@ -1981,6 +2078,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2001,6 +2099,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i32 %vl) @@ -2025,6 +2124,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2045,6 +2145,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i32 %vl) @@ -2063,6 +2164,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2083,6 +2185,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i32 %vl) @@ -2102,6 +2205,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2122,6 +2226,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i32 %vl) @@ -2142,6 +2247,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2162,6 +2268,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i32 %vl) @@ -2183,6 +2290,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2203,6 +2311,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i32 %vl) @@ -2225,6 +2334,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2245,6 +2355,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i32 %vl) @@ -2268,6 +2379,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2288,6 +2400,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i32 %vl) @@ -2312,6 +2425,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2332,6 +2446,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl) @@ -2350,6 +2465,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2370,6 +2486,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i32 %vl) @@ -2388,6 +2505,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2408,6 +2526,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i32 %vl) @@ -2427,6 +2546,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2447,6 +2567,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i32 %vl) @@ -2467,6 +2588,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2487,6 +2609,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i32 %vl) @@ -2508,6 +2631,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2528,6 +2652,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i32 %vl) @@ -2550,6 +2675,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2570,6 +2696,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i32 %vl) @@ -2593,6 +2720,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2613,6 +2741,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i32 %vl) @@ -2637,6 +2766,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl, i32 1) @@ -2657,6 +2787,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i32 %vl) @@ -2675,6 +2806,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2695,6 +2827,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i32 %vl) @@ -2714,6 +2847,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2734,6 +2868,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i32 %vl) @@ -2754,6 +2889,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2774,6 +2910,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i32 %vl) @@ -2795,6 +2932,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2815,6 +2953,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i32 %vl) @@ -2837,6 +2976,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2857,6 +2997,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i32 %vl) @@ -2880,6 +3021,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2900,6 +3042,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i32 %vl) @@ -2924,6 +3067,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl, i32 1) @@ -2944,6 +3088,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl) @@ -2962,6 +3107,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -2982,6 +3128,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i32 %vl) @@ -3001,6 +3148,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -3021,6 +3169,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i32 %vl) @@ -3041,6 +3190,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl, i32 1) @@ -3061,6 +3211,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i32 %vl) @@ -3079,6 +3230,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3099,6 +3251,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i32 %vl) @@ -3117,6 +3270,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3137,6 +3291,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i32 %vl) @@ -3155,6 +3310,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3175,6 +3331,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i32 %vl) @@ -3194,6 +3351,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3214,6 +3372,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i32 %vl) @@ -3234,6 +3393,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3254,6 +3414,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i32 %vl) @@ -3275,6 +3436,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3295,6 +3457,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i32 %vl) @@ -3317,6 +3480,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3337,6 +3501,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i32 %vl) @@ -3360,6 +3525,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3380,6 +3546,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i32 %vl) @@ -3404,6 +3571,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -3424,6 +3592,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i32 %vl) @@ -3442,6 +3611,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3462,6 +3632,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i32 %vl) @@ -3481,6 +3652,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3501,6 +3673,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i32 %vl) @@ -3521,6 +3694,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3541,6 +3715,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i32 %vl) @@ -3562,6 +3737,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3582,6 +3758,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i32 %vl) @@ -3604,6 +3781,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3624,6 +3802,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i32 %vl) @@ -3647,6 +3826,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3667,6 +3847,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i32 %vl) @@ -3691,6 +3872,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -3711,6 +3893,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i32 %vl) @@ -3729,6 +3912,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3749,6 +3933,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i32 %vl) @@ -3768,6 +3953,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3788,6 +3974,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i32 %vl) @@ -3808,6 +3995,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3828,6 +4016,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i32 %vl) @@ -3849,6 +4038,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3869,6 +4059,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i32 %vl) @@ -3891,6 +4082,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3911,6 +4103,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i32 %vl) @@ -3934,6 +4127,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3954,6 +4148,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i32 %vl) @@ -3978,6 +4173,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -3998,6 +4194,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i32 %vl) @@ -4016,6 +4213,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4036,6 +4234,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i32 %vl) @@ -4055,6 +4254,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4075,6 +4275,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i32 %vl) @@ -4095,6 +4296,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4115,6 +4317,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i32 %vl) @@ -4136,6 +4339,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4156,6 +4360,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i32 %vl) @@ -4178,6 +4383,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4198,6 +4404,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i32 %vl) @@ -4221,6 +4428,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4241,6 +4449,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i32 %vl) @@ -4265,6 +4474,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4285,6 +4495,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i32 %vl) @@ -4303,6 +4514,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4323,6 +4535,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i32 %vl) @@ -4342,6 +4555,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4362,6 +4576,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i32 %vl) @@ -4382,6 +4597,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4402,6 +4618,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i32 %vl) @@ -4420,6 +4637,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -4440,6 +4658,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i32 %vl) @@ -4458,6 +4677,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -4478,6 +4698,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i32 %vl) @@ -4497,6 +4718,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -4517,6 +4739,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i32 %vl) @@ -4537,6 +4760,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl, i32 1) @@ -4557,6 +4781,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i32 %vl) @@ -4575,6 +4800,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4595,6 +4821,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i32 %vl) @@ -4614,6 +4841,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4634,6 +4862,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i32 %vl) @@ -4654,6 +4883,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4674,6 +4904,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i32 %vl) @@ -4695,6 +4926,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4715,6 +4947,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i32 %vl) @@ -4737,6 +4970,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4757,6 +4991,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i32 %vl) @@ -4780,6 +5015,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4800,6 +5036,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i32 %vl) @@ -4824,6 +5061,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4844,6 +5082,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i32 %vl) @@ -4862,6 +5101,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4882,6 +5122,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i32 %vl) @@ -4901,6 +5142,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4921,6 +5163,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i32 %vl) @@ -4941,6 +5184,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -4961,6 +5205,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i32 %vl) @@ -4982,6 +5227,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -5002,6 +5248,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i32 %vl) @@ -5024,6 +5271,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -5044,6 +5292,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i32 %vl) @@ -5067,6 +5316,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -5087,6 +5337,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i32 %vl) @@ -5111,6 +5362,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl, i32 1) @@ -5131,6 +5383,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i32 %vl) @@ -5149,6 +5402,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -5169,6 +5423,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i32 %vl) @@ -5188,6 +5443,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) @@ -5208,6 +5464,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i32 %vl) @@ -5228,6 +5485,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl, i32 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v0, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) @@ -29,6 +30,7 @@ ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -43,6 +45,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) @@ -57,6 +60,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -69,6 +73,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v0, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) @@ -82,6 +87,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl) @@ -31,6 +32,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -51,6 +53,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl) @@ -69,6 +72,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -89,6 +93,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i64 %vl) @@ -108,6 +113,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -128,6 +134,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i64 %vl) @@ -148,6 +155,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -168,6 +176,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl) @@ -186,6 +195,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -206,6 +216,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i64 %vl) @@ -225,6 +236,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -245,6 +257,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i64 %vl) @@ -265,6 +278,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -285,6 +299,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl) @@ -303,6 +318,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -323,6 +339,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* %base, i64 %vl) @@ -342,6 +359,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -362,6 +380,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* %base, i64 %vl) @@ -382,6 +401,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -402,6 +422,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* %base, i64 %vl) @@ -423,6 +444,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -443,6 +465,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* %base, i64 %vl) @@ -465,6 +488,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -485,6 +509,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* %base, i64 %vl) @@ -508,6 +533,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -528,6 +554,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* %base, i64 %vl) @@ -552,6 +579,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -572,6 +600,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i64 %vl) @@ -590,6 +619,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -610,6 +640,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i64 %vl) @@ -629,6 +660,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -649,6 +681,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i64 %vl) @@ -669,6 +702,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -689,6 +723,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i64 %vl) @@ -710,6 +745,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -730,6 +766,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i64 %vl) @@ -752,6 +789,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -772,6 +810,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i64 %vl) @@ -795,6 +834,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -815,6 +855,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i64 %vl) @@ -839,6 +880,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -859,6 +901,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl) @@ -877,6 +920,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -897,6 +941,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i64 %vl) @@ -916,6 +961,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -936,6 +982,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i64 %vl) @@ -956,6 +1003,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -976,6 +1024,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i64 %vl) @@ -994,6 +1043,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1014,6 +1064,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i64 %vl) @@ -1033,6 +1084,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1053,6 +1105,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i64 %vl) @@ -1073,6 +1126,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1093,6 +1147,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i64 %vl) @@ -1114,6 +1169,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1134,6 +1190,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i64 %vl) @@ -1156,6 +1213,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1176,6 +1234,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i64 %vl) @@ -1199,6 +1258,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1219,6 +1279,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i64 %vl) @@ -1243,6 +1304,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1263,6 +1325,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i64 %vl) @@ -1281,6 +1344,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1301,6 +1365,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i64 %vl) @@ -1320,6 +1385,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1340,6 +1406,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i64 %vl) @@ -1360,6 +1427,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1380,6 +1448,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i64 %vl) @@ -1401,6 +1470,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1421,6 +1491,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i64 %vl) @@ -1443,6 +1514,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1463,6 +1535,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i64 %vl) @@ -1486,6 +1559,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1506,6 +1580,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i64 %vl) @@ -1530,6 +1605,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -1550,6 +1626,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl) @@ -1568,6 +1645,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1588,6 +1666,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i64 %vl) @@ -1607,6 +1686,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1627,6 +1707,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i64 %vl) @@ -1647,6 +1728,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1667,6 +1749,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i64 %vl) @@ -1688,6 +1771,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1708,6 +1792,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i64 %vl) @@ -1730,6 +1815,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1750,6 +1836,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i64 %vl) @@ -1773,6 +1860,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1793,6 +1881,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i64 %vl) @@ -1817,6 +1906,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -1837,6 +1927,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl) @@ -1855,6 +1946,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1875,6 +1967,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i64 %vl) @@ -1894,6 +1987,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1914,6 +2008,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i64 %vl) @@ -1934,6 +2029,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1954,6 +2050,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i64 %vl) @@ -1975,6 +2072,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -1995,6 +2093,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i64 %vl) @@ -2017,6 +2116,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2037,6 +2137,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i64 %vl) @@ -2060,6 +2161,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2080,6 +2182,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i64 %vl) @@ -2104,6 +2207,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2124,6 +2228,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl) @@ -2142,6 +2247,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -2162,6 +2268,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl) @@ -2180,6 +2287,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2200,6 +2308,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i64 %vl) @@ -2219,6 +2328,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2239,6 +2349,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i64 %vl) @@ -2259,6 +2370,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2279,6 +2391,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i64 %vl) @@ -2300,6 +2413,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2320,6 +2434,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i64 %vl) @@ -2342,6 +2457,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2362,6 +2478,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i64 %vl) @@ -2385,6 +2502,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2405,6 +2523,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i64 %vl) @@ -2429,6 +2548,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -2449,6 +2569,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i64 %vl) @@ -2467,6 +2588,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2487,6 +2609,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i64 %vl) @@ -2506,6 +2629,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2526,6 +2650,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i64 %vl) @@ -2546,6 +2671,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2566,6 +2692,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i64 %vl) @@ -2587,6 +2714,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2607,6 +2735,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i64 %vl) @@ -2629,6 +2758,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2649,6 +2779,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i64 %vl) @@ -2672,6 +2803,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2692,6 +2824,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i64 %vl) @@ -2716,6 +2849,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2736,6 +2870,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i64 %vl) @@ -2754,6 +2889,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2774,6 +2910,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i64 %vl) @@ -2793,6 +2930,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2813,6 +2951,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i64 %vl) @@ -2833,6 +2972,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2853,6 +2993,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i64 %vl) @@ -2874,6 +3015,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2894,6 +3036,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i64 %vl) @@ -2916,6 +3059,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2936,6 +3080,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i64 %vl) @@ -2959,6 +3104,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -2979,6 +3125,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i64 %vl) @@ -3003,6 +3150,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -3023,6 +3171,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i64 %vl) @@ -3041,6 +3190,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl, i64 1) @@ -3061,6 +3211,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl) @@ -3079,6 +3230,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl, i64 1) @@ -3099,6 +3251,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i64 %vl) @@ -3117,6 +3270,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3137,6 +3291,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i64 %vl) @@ -3156,6 +3311,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3176,6 +3332,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i64 %vl) @@ -3196,6 +3353,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3216,6 +3374,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i64 %vl) @@ -3237,6 +3396,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3257,6 +3417,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i64 %vl) @@ -3279,6 +3440,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3299,6 +3461,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i64 %vl) @@ -3322,6 +3485,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3342,6 +3506,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i64 %vl) @@ -3366,6 +3531,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl, i64 1) @@ -3386,6 +3552,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl) @@ -3404,6 +3571,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -3424,6 +3592,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* %base, i64 %vl) @@ -3443,6 +3612,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -3463,6 +3633,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* %base, i64 %vl) @@ -3483,6 +3654,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl, i64 1) @@ -3503,6 +3675,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i64 %vl) @@ -3521,6 +3694,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -3541,6 +3715,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i64 %vl) @@ -3559,6 +3734,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3579,6 +3755,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i64 %vl) @@ -3597,6 +3774,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3617,6 +3795,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i64 %vl) @@ -3636,6 +3815,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3656,6 +3836,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i64 %vl) @@ -3676,6 +3857,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3696,6 +3878,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i64 %vl) @@ -3717,6 +3900,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3737,6 +3921,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i64 %vl) @@ -3759,6 +3944,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3779,6 +3965,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i64 %vl) @@ -3802,6 +3989,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3822,6 +4010,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i64 %vl) @@ -3846,6 +4035,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -3866,6 +4056,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i64 %vl) @@ -3884,6 +4075,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -3904,6 +4096,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i64 %vl) @@ -3923,6 +4116,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -3943,6 +4137,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i64 %vl) @@ -3963,6 +4158,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -3983,6 +4179,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i64 %vl) @@ -4004,6 +4201,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4024,6 +4222,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i64 %vl) @@ -4046,6 +4245,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4066,6 +4266,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i64 %vl) @@ -4089,6 +4290,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4109,6 +4311,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i64 %vl) @@ -4133,6 +4336,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4153,6 +4357,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i64 %vl) @@ -4171,6 +4376,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4191,6 +4397,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i64 %vl) @@ -4210,6 +4417,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4230,6 +4438,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i64 %vl) @@ -4250,6 +4459,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4270,6 +4480,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i64 %vl) @@ -4291,6 +4502,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4311,6 +4523,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i64 %vl) @@ -4333,6 +4546,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4353,6 +4567,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i64 %vl) @@ -4376,6 +4591,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4396,6 +4612,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i64 %vl) @@ -4420,6 +4637,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4440,6 +4658,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i64 %vl) @@ -4458,6 +4677,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4478,6 +4698,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i64 %vl) @@ -4497,6 +4718,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4517,6 +4739,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i64 %vl) @@ -4537,6 +4760,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4557,6 +4781,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i64 %vl) @@ -4578,6 +4803,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4598,6 +4824,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i64 %vl) @@ -4620,6 +4847,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4640,6 +4868,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i64 %vl) @@ -4663,6 +4892,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4683,6 +4913,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i64 %vl) @@ -4707,6 +4938,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4727,6 +4959,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i64 %vl) @@ -4745,6 +4978,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4765,6 +4999,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i64 %vl) @@ -4784,6 +5019,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4804,6 +5040,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i64 %vl) @@ -4824,6 +5061,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -4844,6 +5082,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i64 %vl) @@ -4862,6 +5101,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -4882,6 +5122,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i64 %vl) @@ -4900,6 +5141,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -4920,6 +5162,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i64 %vl) @@ -4939,6 +5182,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -4959,6 +5203,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i64 %vl) @@ -4979,6 +5224,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl, i64 1) @@ -4999,6 +5245,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i64 %vl) @@ -5017,6 +5264,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5037,6 +5285,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i64 %vl) @@ -5056,6 +5305,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5076,6 +5326,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i64 %vl) @@ -5096,6 +5347,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5116,6 +5368,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i64 %vl) @@ -5137,6 +5390,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5157,6 +5411,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i64 %vl) @@ -5179,6 +5434,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5199,6 +5455,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i64 %vl) @@ -5222,6 +5479,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5242,6 +5500,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i64 %vl) @@ -5266,6 +5525,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5286,6 +5546,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i64 %vl) @@ -5304,6 +5565,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5324,6 +5586,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i64 %vl) @@ -5343,6 +5606,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5363,6 +5627,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i64 %vl) @@ -5383,6 +5648,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5403,6 +5669,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i64 %vl) @@ -5424,6 +5691,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5444,6 +5712,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i64 %vl) @@ -5466,6 +5735,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5486,6 +5756,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i64 %vl) @@ -5509,6 +5780,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5529,6 +5801,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i64 %vl) @@ -5553,6 +5826,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl, i64 1) @@ -5573,6 +5847,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i64 %vl) @@ -5591,6 +5866,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -5611,6 +5887,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i64 %vl) @@ -5630,6 +5907,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) @@ -5650,6 +5928,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i64 %vl) @@ -5670,6 +5949,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl, i64 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -59,6 +62,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -77,6 +81,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -93,6 +98,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -111,6 +117,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -128,6 +135,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -146,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -164,6 +173,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -182,6 +192,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -201,6 +212,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -219,6 +231,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -239,6 +252,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -257,6 +271,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -278,6 +293,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl) @@ -296,6 +312,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -311,6 +328,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -329,6 +347,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -345,6 +364,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -363,6 +383,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -380,6 +401,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl) @@ -398,6 +420,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -413,6 +436,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -431,6 +455,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -447,6 +472,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -465,6 +491,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -482,6 +509,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -500,6 +528,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -518,6 +547,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -536,6 +566,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -555,6 +586,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -573,6 +605,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -593,6 +626,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -611,6 +645,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -632,6 +667,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl) @@ -650,6 +686,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -665,6 +702,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -683,6 +721,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -699,6 +738,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -717,6 +757,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -734,6 +775,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -752,6 +794,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -770,6 +813,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -788,6 +832,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -807,6 +852,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -825,6 +871,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -845,6 +892,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -863,6 +911,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -884,6 +933,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl) @@ -902,6 +952,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -917,6 +968,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -935,6 +987,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -951,6 +1004,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -969,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -986,6 +1041,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1004,6 +1060,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1022,6 +1079,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1040,6 +1098,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1059,6 +1118,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1077,6 +1137,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1097,6 +1158,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1115,6 +1177,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1136,6 +1199,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl) @@ -1154,6 +1218,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1169,6 +1234,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1187,6 +1253,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1203,6 +1270,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1221,6 +1289,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1238,6 +1307,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl) @@ -1256,6 +1326,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1271,6 +1342,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1289,6 +1361,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1305,6 +1378,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1323,6 +1397,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1340,6 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1358,6 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1376,6 +1453,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1394,6 +1472,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1413,6 +1492,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1431,6 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1451,6 +1532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1469,6 +1551,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1490,6 +1573,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl) @@ -1508,6 +1592,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) @@ -1523,6 +1608,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl) @@ -1541,6 +1627,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1556,6 +1643,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1574,6 +1662,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1590,6 +1679,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1608,6 +1698,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1625,6 +1716,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1643,6 +1735,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1661,6 +1754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1679,6 +1773,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1698,6 +1793,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1716,6 +1812,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1736,6 +1833,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1754,6 +1852,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1775,6 +1874,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl) @@ -1793,6 +1893,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1808,6 +1909,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1826,6 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1842,6 +1945,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1860,6 +1964,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1877,6 +1982,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1895,6 +2001,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1913,6 +2020,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1931,6 +2039,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1950,6 +2059,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1968,6 +2078,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -1988,6 +2099,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -2006,6 +2118,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -2027,6 +2140,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl) @@ -2045,6 +2159,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) @@ -2060,6 +2175,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl) @@ -2078,6 +2194,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2093,6 +2210,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2111,6 +2229,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2127,6 +2246,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2145,6 +2265,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2162,6 +2283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2180,6 +2302,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2198,6 +2321,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2216,6 +2340,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2235,6 +2360,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2253,6 +2379,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2273,6 +2400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2291,6 +2419,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2312,6 +2441,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl) @@ -2330,6 +2460,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2345,6 +2476,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2363,6 +2495,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2379,6 +2512,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2397,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2414,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2432,6 +2568,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2450,6 +2587,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2468,6 +2606,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2487,6 +2626,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2505,6 +2645,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2525,6 +2666,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2543,6 +2685,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2564,6 +2707,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl) @@ -2582,6 +2726,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2597,6 +2742,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2615,6 +2761,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2631,6 +2778,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2649,6 +2797,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2666,6 +2815,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl) @@ -2684,6 +2834,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) @@ -2699,6 +2850,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl) @@ -2717,6 +2869,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) @@ -2732,6 +2885,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl) @@ -2750,6 +2904,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2765,6 +2920,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2783,6 +2939,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2799,6 +2956,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2817,6 +2975,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2834,6 +2993,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2852,6 +3012,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2870,6 +3031,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2888,6 +3050,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2907,6 +3070,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2925,6 +3089,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2945,6 +3110,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2963,6 +3129,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -2984,6 +3151,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl) @@ -3002,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3017,6 +3186,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3035,6 +3205,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3051,6 +3222,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3069,6 +3241,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3086,6 +3259,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3104,6 +3278,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3122,6 +3297,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3140,6 +3316,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3159,6 +3336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3177,6 +3355,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3197,6 +3376,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3215,6 +3395,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3236,6 +3417,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl) @@ -3254,6 +3436,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3269,6 +3452,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3287,6 +3471,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3303,6 +3488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3321,6 +3507,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3338,6 +3525,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3356,6 +3544,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3374,6 +3563,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3392,6 +3582,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3411,6 +3602,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3429,6 +3621,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3449,6 +3642,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3467,6 +3661,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3488,6 +3683,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl) @@ -3506,6 +3702,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3521,6 +3718,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3539,6 +3737,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3555,6 +3754,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3573,6 +3773,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3590,6 +3791,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3608,6 +3810,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3626,6 +3829,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3644,6 +3848,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3663,6 +3868,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3681,6 +3887,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3701,6 +3908,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3719,6 +3927,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3740,6 +3949,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl) @@ -3758,6 +3968,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3773,6 +3984,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3791,6 +4003,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3807,6 +4020,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3825,6 +4039,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3842,6 +4057,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl) @@ -3860,6 +4076,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) @@ -3875,6 +4092,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl) @@ -3893,6 +4111,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3908,6 +4127,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3926,6 +4146,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3942,6 +4163,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3960,6 +4182,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3977,6 +4200,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl) @@ -3995,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4010,6 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4028,6 +4254,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4044,6 +4271,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4062,6 +4290,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4079,6 +4308,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4097,6 +4327,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4115,6 +4346,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4133,6 +4365,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4152,6 +4385,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4170,6 +4404,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4190,6 +4425,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4208,6 +4444,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4229,6 +4466,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl) @@ -4247,6 +4485,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4262,6 +4501,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4280,6 +4520,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4296,6 +4537,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4314,6 +4556,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4331,6 +4574,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4349,6 +4593,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4367,6 +4612,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4385,6 +4631,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4404,6 +4651,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4422,6 +4670,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4442,6 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4460,6 +4710,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4481,6 +4732,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl) @@ -4499,6 +4751,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) @@ -4514,6 +4767,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl) @@ -4532,6 +4786,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) @@ -4548,6 +4803,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl) @@ -4566,6 +4822,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) @@ -4583,6 +4840,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) @@ -26,6 +27,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -59,6 +62,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -77,6 +81,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -93,6 +98,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -111,6 +117,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -128,6 +135,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl) @@ -146,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -161,6 +170,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -179,6 +189,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -195,6 +206,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -213,6 +225,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -230,6 +243,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl) @@ -248,6 +262,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -263,6 +278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -281,6 +297,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -297,6 +314,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -315,6 +333,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -332,6 +351,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -350,6 +370,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -368,6 +389,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -386,6 +408,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -405,6 +428,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -423,6 +447,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -443,6 +468,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -461,6 +487,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -482,6 +509,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl) @@ -500,6 +528,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -515,6 +544,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -533,6 +563,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -549,6 +580,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -567,6 +599,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -584,6 +617,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -602,6 +636,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -620,6 +655,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -638,6 +674,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -657,6 +694,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -675,6 +713,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -695,6 +734,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -713,6 +753,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -734,6 +775,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl) @@ -752,6 +794,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -767,6 +810,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -785,6 +829,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -801,6 +846,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -819,6 +865,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -836,6 +883,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl) @@ -854,6 +902,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -869,6 +918,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -887,6 +937,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -903,6 +954,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -921,6 +973,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -938,6 +991,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -956,6 +1010,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -974,6 +1029,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -992,6 +1048,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1011,6 +1068,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1029,6 +1087,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1049,6 +1108,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1067,6 +1127,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1088,6 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl) @@ -1106,6 +1168,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1121,6 +1184,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1139,6 +1203,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1155,6 +1220,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1173,6 +1239,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1190,6 +1257,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1208,6 +1276,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1226,6 +1295,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1244,6 +1314,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1263,6 +1334,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1281,6 +1353,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1301,6 +1374,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1319,6 +1393,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1340,6 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl) @@ -1358,6 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1373,6 +1450,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1391,6 +1469,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1407,6 +1486,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1425,6 +1505,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1442,6 +1523,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1460,6 +1542,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1478,6 +1561,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1496,6 +1580,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1515,6 +1600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1533,6 +1619,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1553,6 +1640,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1571,6 +1659,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1592,6 +1681,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl) @@ -1610,6 +1700,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1625,6 +1716,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1643,6 +1735,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1659,6 +1752,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1677,6 +1771,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1694,6 +1789,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1712,6 +1808,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1730,6 +1827,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1748,6 +1846,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1767,6 +1866,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1785,6 +1885,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1805,6 +1906,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1823,6 +1925,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1844,6 +1947,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl) @@ -1862,6 +1966,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) @@ -1877,6 +1982,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl) @@ -1895,6 +2001,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1910,6 +2017,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1928,6 +2036,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1944,6 +2053,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1962,6 +2072,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1979,6 +2090,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -1997,6 +2109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2015,6 +2128,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2033,6 +2147,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2052,6 +2167,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2070,6 +2186,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2090,6 +2207,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2108,6 +2226,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2129,6 +2248,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl) @@ -2147,6 +2267,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2162,6 +2283,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2180,6 +2302,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2196,6 +2319,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2214,6 +2338,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2231,6 +2356,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2249,6 +2375,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2267,6 +2394,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2285,6 +2413,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2304,6 +2433,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2322,6 +2452,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2342,6 +2473,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2360,6 +2492,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2381,6 +2514,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl) @@ -2399,6 +2533,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2414,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2432,6 +2568,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2448,6 +2585,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2466,6 +2604,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2483,6 +2622,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2501,6 +2641,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2519,6 +2660,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2537,6 +2679,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2556,6 +2699,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2574,6 +2718,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2594,6 +2739,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2612,6 +2758,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2633,6 +2780,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl) @@ -2651,6 +2799,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) @@ -2666,6 +2815,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl) @@ -2684,6 +2834,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) @@ -2699,6 +2850,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl) @@ -2717,6 +2869,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2732,6 +2885,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2750,6 +2904,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2766,6 +2921,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2784,6 +2940,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2801,6 +2958,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2819,6 +2977,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2837,6 +2996,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2855,6 +3015,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2874,6 +3035,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2892,6 +3054,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2912,6 +3075,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2930,6 +3094,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2951,6 +3116,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl) @@ -2969,6 +3135,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -2984,6 +3151,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -3002,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -3018,6 +3187,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -3036,6 +3206,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -3053,6 +3224,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl) @@ -3071,6 +3243,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) @@ -3086,6 +3259,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl) @@ -3104,6 +3278,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) @@ -3119,6 +3294,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl) @@ -3137,6 +3313,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3152,6 +3329,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3170,6 +3348,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3186,6 +3365,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3204,6 +3384,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3221,6 +3402,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3239,6 +3421,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3257,6 +3440,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3275,6 +3459,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3294,6 +3479,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3312,6 +3498,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3332,6 +3519,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3350,6 +3538,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3371,6 +3560,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl) @@ -3389,6 +3579,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3404,6 +3595,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3422,6 +3614,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3438,6 +3631,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3456,6 +3650,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3473,6 +3668,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3491,6 +3687,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3509,6 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3527,6 +3725,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3546,6 +3745,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3564,6 +3764,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3584,6 +3785,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3602,6 +3804,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3623,6 +3826,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl) @@ -3641,6 +3845,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3656,6 +3861,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3674,6 +3880,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3690,6 +3897,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3708,6 +3916,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3725,6 +3934,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3743,6 +3953,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3761,6 +3972,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3779,6 +3991,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3798,6 +4011,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3816,6 +4030,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3836,6 +4051,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3854,6 +4070,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3875,6 +4092,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl) @@ -3893,6 +4111,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3908,6 +4127,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3926,6 +4146,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3942,6 +4163,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3960,6 +4182,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3977,6 +4200,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -3995,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4013,6 +4238,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4031,6 +4257,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4050,6 +4277,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4068,6 +4296,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4088,6 +4317,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4106,6 +4336,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4127,6 +4358,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl) @@ -4145,6 +4377,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4160,6 +4393,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4178,6 +4412,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4194,6 +4429,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4212,6 +4448,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4229,6 +4466,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl) @@ -4247,6 +4485,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) @@ -4262,6 +4501,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl) @@ -4280,6 +4520,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4295,6 +4536,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4313,6 +4555,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4329,6 +4572,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4347,6 +4591,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4364,6 +4609,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl) @@ -4382,6 +4628,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4397,6 +4644,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4415,6 +4663,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4431,6 +4680,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4449,6 +4699,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4466,6 +4717,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4484,6 +4736,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4502,6 +4755,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4520,6 +4774,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4539,6 +4794,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4557,6 +4813,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4577,6 +4834,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4595,6 +4853,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4616,6 +4875,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl) @@ -4634,6 +4894,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4649,6 +4910,7 @@ ; CHECK-NEXT: vmv1r.v v8, v7 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4667,6 +4929,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4683,6 +4946,7 @@ ; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4701,6 +4965,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4718,6 +4983,7 @@ ; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4736,6 +5002,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4754,6 +5021,7 @@ ; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4772,6 +5040,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4791,6 +5060,7 @@ ; CHECK-NEXT: vmv1r.v v12, v7 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4809,6 +5079,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4829,6 +5100,7 @@ ; CHECK-NEXT: vmv1r.v v13, v7 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4847,6 +5119,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4868,6 +5141,7 @@ ; CHECK-NEXT: vmv1r.v v14, v7 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl) @@ -4886,6 +5160,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) @@ -4901,6 +5176,7 @@ ; CHECK-NEXT: vmv2r.v v8, v6 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl) @@ -4919,6 +5195,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) @@ -4935,6 +5212,7 @@ ; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl) @@ -4953,6 +5231,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) @@ -4970,6 +5249,7 @@ ; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -101,6 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -115,6 +122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -131,6 +139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -145,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -161,6 +171,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -175,6 +186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -191,6 +203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -207,6 +220,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -223,6 +237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -239,6 +254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -255,6 +271,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -287,6 +305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -304,6 +323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -320,6 +340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -337,6 +358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -353,6 +375,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -370,6 +393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -386,6 +410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -404,6 +429,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -420,6 +446,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -438,6 +465,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -454,6 +482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -472,6 +501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -488,6 +518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -507,6 +538,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -523,6 +555,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -542,6 +575,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -558,6 +592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -577,6 +612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -593,6 +629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -613,6 +650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -629,6 +667,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -649,6 +688,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -665,6 +705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -685,6 +726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -701,6 +743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) @@ -722,6 +765,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -738,6 +782,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) @@ -759,6 +804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -775,6 +821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) @@ -796,6 +843,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -812,6 +860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -826,6 +875,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -842,6 +892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -856,6 +907,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -872,6 +924,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -886,6 +939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -902,6 +956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -917,6 +972,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -933,6 +989,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -949,6 +1006,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -965,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -980,6 +1039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -996,6 +1056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1013,6 +1074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1029,6 +1091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1046,6 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1062,6 +1126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1078,6 +1143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -1094,6 +1160,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1108,6 +1175,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1124,6 +1192,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1138,6 +1207,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1154,6 +1224,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1168,6 +1239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1184,6 +1256,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1200,6 +1273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1216,6 +1290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1232,6 +1307,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1248,6 +1324,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1264,6 +1341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1280,6 +1358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1297,6 +1376,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1313,6 +1393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1330,6 +1411,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1346,6 +1428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1363,6 +1446,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1379,6 +1463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1397,6 +1482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1413,6 +1499,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1431,6 +1518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1447,6 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1465,6 +1554,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1481,6 +1571,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1500,6 +1591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1516,6 +1608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1535,6 +1628,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1551,6 +1645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1570,6 +1665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1586,6 +1682,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1606,6 +1703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1622,6 +1720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1642,6 +1741,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1658,6 +1758,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1678,6 +1779,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1694,6 +1796,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1715,6 +1818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1731,6 +1835,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1752,6 +1857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1768,6 +1874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1789,6 +1896,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -1805,6 +1913,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1819,6 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1835,6 +1945,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1849,6 +1960,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1865,6 +1977,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1879,6 +1992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1895,6 +2009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1911,6 +2026,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1927,6 +2043,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1943,6 +2060,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1959,6 +2077,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1974,6 +2093,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -1990,6 +2110,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2007,6 +2128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2023,6 +2145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2040,6 +2163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2056,6 +2180,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2073,6 +2198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2089,6 +2215,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2107,6 +2234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2123,6 +2251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2141,6 +2270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2157,6 +2287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2175,6 +2306,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2191,6 +2323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2210,6 +2343,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2226,6 +2360,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2245,6 +2380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2261,6 +2397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2280,6 +2417,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2296,6 +2434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2316,6 +2455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2332,6 +2472,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2352,6 +2493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2368,6 +2510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2388,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2404,6 +2548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2425,6 +2570,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2441,6 +2587,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2462,6 +2609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2478,6 +2626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2499,6 +2648,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -2515,6 +2665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2529,6 +2680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2545,6 +2697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2559,6 +2712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2575,6 +2729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2589,6 +2744,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2605,6 +2761,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2621,6 +2778,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2637,6 +2795,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2653,6 +2812,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2669,6 +2829,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2685,6 +2846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2701,6 +2863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2718,6 +2881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2734,6 +2898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2751,6 +2916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2767,6 +2933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2784,6 +2951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2800,6 +2968,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2818,6 +2987,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2834,6 +3004,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2852,6 +3023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2868,6 +3040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2886,6 +3059,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2902,6 +3076,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -2921,6 +3096,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2937,6 +3113,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -2956,6 +3133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -2972,6 +3150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -2991,6 +3170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3007,6 +3187,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3027,6 +3208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3043,6 +3225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3063,6 +3246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3079,6 +3263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3099,6 +3284,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3115,6 +3301,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) @@ -3136,6 +3323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3152,6 +3340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) @@ -3173,6 +3362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3189,6 +3379,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) @@ -3210,6 +3401,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -3226,6 +3418,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3240,6 +3433,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3256,6 +3450,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3270,6 +3465,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3286,6 +3482,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3300,6 +3497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3316,6 +3514,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3332,6 +3531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3348,6 +3548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3364,6 +3565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3380,6 +3582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3395,6 +3598,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3411,6 +3615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3428,6 +3633,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3444,6 +3650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3461,6 +3668,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3477,6 +3685,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3494,6 +3703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -3510,6 +3720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3524,6 +3735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3540,6 +3752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3554,6 +3767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3570,6 +3784,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3584,6 +3799,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3600,6 +3816,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3615,6 +3832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3631,6 +3849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3647,6 +3866,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3663,6 +3883,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3678,6 +3899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3694,6 +3916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3711,6 +3934,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3727,6 +3951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3744,6 +3969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3760,6 +3986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3776,6 +4003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3792,6 +4020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3810,6 +4039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3826,6 +4056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3844,6 +4075,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3860,6 +4092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3877,6 +4110,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3893,6 +4127,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3912,6 +4147,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3928,6 +4164,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3947,6 +4184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3963,6 +4201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3982,6 +4221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -3998,6 +4238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4018,6 +4259,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4034,6 +4276,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4054,6 +4297,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4070,6 +4314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4090,6 +4335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4106,6 +4352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4127,6 +4374,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4143,6 +4391,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4164,6 +4413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4180,6 +4430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4201,6 +4452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4217,6 +4469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4231,6 +4484,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4247,6 +4501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4261,6 +4516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4277,6 +4533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -4291,6 +4548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -4307,6 +4565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4321,6 +4580,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4337,6 +4597,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4351,6 +4612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4367,6 +4629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4381,6 +4644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4397,6 +4661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4413,6 +4678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4429,6 +4695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4445,6 +4712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4461,6 +4729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4476,6 +4745,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4492,6 +4762,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4509,6 +4780,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4525,6 +4797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4542,6 +4815,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4558,6 +4832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4575,6 +4850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4591,6 +4867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4609,6 +4886,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4625,6 +4903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4643,6 +4922,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4659,6 +4939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4677,6 +4958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4693,6 +4975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4712,6 +4995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4728,6 +5012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4747,6 +5032,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4763,6 +5049,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4782,6 +5069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4798,6 +5086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4818,6 +5107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4834,6 +5124,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4854,6 +5145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4870,6 +5162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -4890,6 +5183,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4906,6 +5200,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) @@ -4927,6 +5222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4943,6 +5239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) @@ -4964,6 +5261,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -4980,6 +5278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) @@ -5001,6 +5300,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5017,6 +5317,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5031,6 +5332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5047,6 +5349,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5061,6 +5364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5077,6 +5381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5091,6 +5396,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5107,6 +5413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5123,6 +5430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5139,6 +5447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5155,6 +5464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5171,6 +5481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5187,6 +5498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5203,6 +5515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5220,6 +5533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5236,6 +5550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5253,6 +5568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5269,6 +5585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5286,6 +5603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5302,6 +5620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5320,6 +5639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5336,6 +5656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5354,6 +5675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5370,6 +5692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5388,6 +5711,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5404,6 +5728,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5423,6 +5748,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5439,6 +5765,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5458,6 +5785,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5474,6 +5802,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5493,6 +5822,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5509,6 +5839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5529,6 +5860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5545,6 +5877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5565,6 +5898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5581,6 +5915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5601,6 +5936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5617,6 +5953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) @@ -5638,6 +5975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5654,6 +5992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) @@ -5675,6 +6014,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5691,6 +6031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) @@ -5712,6 +6053,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -5728,6 +6070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5742,6 +6085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5758,6 +6102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -5772,6 +6117,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5788,6 +6134,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5802,6 +6149,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5818,6 +6166,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5832,6 +6181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5848,6 +6198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5862,6 +6213,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5878,6 +6230,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5894,6 +6247,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5910,6 +6264,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -5926,6 +6281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5942,6 +6298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -5958,6 +6315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -5974,6 +6332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -5991,6 +6350,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6007,6 +6367,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6024,6 +6385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6040,6 +6402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6057,6 +6420,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6073,6 +6437,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6091,6 +6456,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6107,6 +6473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6125,6 +6492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6141,6 +6509,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6159,6 +6528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6175,6 +6545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6194,6 +6565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6210,6 +6582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6229,6 +6602,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6245,6 +6619,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6264,6 +6639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6280,6 +6656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6300,6 +6677,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6316,6 +6694,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6336,6 +6715,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6352,6 +6732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6372,6 +6753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6388,6 +6770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) @@ -6409,6 +6792,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6425,6 +6809,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) @@ -6446,6 +6831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6462,6 +6848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) @@ -6483,6 +6870,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl, i32 1) @@ -6499,6 +6887,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6513,6 +6902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6529,6 +6919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6543,6 +6934,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6559,6 +6951,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6573,6 +6966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6589,6 +6983,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6605,6 +7000,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6621,6 +7017,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6637,6 +7034,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6653,6 +7051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6669,6 +7068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6685,6 +7085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6702,6 +7103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6718,6 +7120,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6735,6 +7138,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6751,6 +7155,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6768,6 +7173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6784,6 +7190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6802,6 +7209,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6818,6 +7226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6836,6 +7245,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6852,6 +7262,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6870,6 +7281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6886,6 +7298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -6905,6 +7318,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6921,6 +7335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -6940,6 +7355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6956,6 +7372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -6975,6 +7392,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -6991,6 +7409,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7011,6 +7430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7027,6 +7447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7047,6 +7468,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7063,6 +7485,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7083,6 +7506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7099,6 +7523,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) @@ -7120,6 +7545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7136,6 +7562,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) @@ -7157,6 +7584,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7173,6 +7601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) @@ -7194,6 +7623,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl, i32 1) @@ -7210,6 +7640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7224,6 +7655,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7240,6 +7672,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7254,6 +7687,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7270,6 +7704,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7284,6 +7719,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7300,6 +7736,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7316,6 +7753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7332,6 +7770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7348,6 +7787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7364,6 +7804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7380,6 +7821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7396,6 +7838,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7413,6 +7856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7429,6 +7873,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7446,6 +7891,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7462,6 +7908,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7479,6 +7926,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl, i32 1) @@ -7495,6 +7943,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7509,6 +7958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7525,6 +7975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7539,6 +7990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7555,6 +8007,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7569,6 +8022,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -7585,6 +8039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7599,6 +8054,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7615,6 +8071,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7629,6 +8086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7645,6 +8103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7659,6 +8118,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7675,6 +8135,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7689,6 +8150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7705,6 +8167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7719,6 +8182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7735,6 +8199,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7749,6 +8214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7765,6 +8231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7781,6 +8248,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7797,6 +8265,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7813,6 +8282,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7829,6 +8299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7845,6 +8316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7861,6 +8333,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7878,6 +8351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7894,6 +8368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7911,6 +8386,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7927,6 +8403,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7944,6 +8421,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7960,6 +8438,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7978,6 +8457,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -7994,6 +8474,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8012,6 +8493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8028,6 +8510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8046,6 +8529,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8062,6 +8546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8081,6 +8566,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8097,6 +8583,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8116,6 +8603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8132,6 +8620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8151,6 +8640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8167,6 +8657,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8187,6 +8678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8203,6 +8695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8223,6 +8716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8239,6 +8733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8259,6 +8754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8275,6 +8771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8296,6 +8793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8312,6 +8810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8333,6 +8832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8349,6 +8849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8370,6 +8871,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -8386,6 +8888,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8400,6 +8903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8416,6 +8920,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8430,6 +8935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8446,6 +8952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8460,6 +8967,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8476,6 +8984,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8492,6 +9001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8508,6 +9018,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8524,6 +9035,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8540,6 +9052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8556,6 +9069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8572,6 +9086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8589,6 +9104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8605,6 +9121,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8622,6 +9139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8638,6 +9156,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8655,6 +9174,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8671,6 +9191,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8689,6 +9210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8705,6 +9227,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8723,6 +9246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8739,6 +9263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8757,6 +9282,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8773,6 +9299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8792,6 +9319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8808,6 +9336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8827,6 +9356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8843,6 +9373,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8862,6 +9393,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8878,6 +9410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8898,6 +9431,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8914,6 +9448,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8934,6 +9469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8950,6 +9486,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8970,6 +9507,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -8986,6 +9524,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9007,6 +9546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9023,6 +9563,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9044,6 +9585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9060,6 +9602,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -9081,6 +9624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9097,6 +9641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9111,6 +9656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9127,6 +9673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9141,6 +9688,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9157,6 +9705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9171,6 +9720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9187,6 +9737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9203,6 +9754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9219,6 +9771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9235,6 +9788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9251,6 +9805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9267,6 +9822,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9283,6 +9839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9300,6 +9857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9316,6 +9874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9333,6 +9892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9349,6 +9909,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9366,6 +9927,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9382,6 +9944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9400,6 +9963,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9416,6 +9980,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9434,6 +9999,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9450,6 +10016,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9468,6 +10035,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9484,6 +10052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9503,6 +10072,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9519,6 +10089,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9538,6 +10109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9554,6 +10126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9573,6 +10146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9589,6 +10163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9609,6 +10184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9625,6 +10201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9645,6 +10222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9661,6 +10239,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9681,6 +10260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9697,6 +10277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) @@ -9718,6 +10299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9734,6 +10316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) @@ -9755,6 +10338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9771,6 +10355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) @@ -9792,6 +10377,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -9808,6 +10394,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9822,6 +10409,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9838,6 +10426,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9852,6 +10441,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9868,6 +10458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9882,6 +10473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9898,6 +10490,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -9914,6 +10507,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9930,6 +10524,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -9946,6 +10541,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9962,6 +10558,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -9978,6 +10575,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -9994,6 +10592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10011,6 +10610,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10027,6 +10627,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10044,6 +10645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10060,6 +10662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10077,6 +10680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10093,6 +10697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10111,6 +10716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10127,6 +10733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10145,6 +10752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10161,6 +10769,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10179,6 +10788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10195,6 +10805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10214,6 +10825,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10230,6 +10842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10249,6 +10862,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10265,6 +10879,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10284,6 +10899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10300,6 +10916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10320,6 +10937,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10336,6 +10954,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10356,6 +10975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10372,6 +10992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10392,6 +11013,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10408,6 +11030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) @@ -10429,6 +11052,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10445,6 +11069,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) @@ -10466,6 +11091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10482,6 +11108,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) @@ -10503,6 +11130,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10519,6 +11147,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10533,6 +11162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10549,6 +11179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10563,6 +11194,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10579,6 +11211,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10593,6 +11226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10609,6 +11243,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10625,6 +11260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10641,6 +11277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10657,6 +11294,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10673,6 +11311,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10688,6 +11327,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10704,6 +11344,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10721,6 +11362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10737,6 +11379,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10754,6 +11397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10770,6 +11414,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10787,6 +11432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -10803,6 +11449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10817,6 +11464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10833,6 +11481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10847,6 +11496,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10863,6 +11513,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10877,6 +11528,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -10893,6 +11545,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10907,6 +11560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10923,6 +11577,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10937,6 +11592,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10953,6 +11609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10967,6 +11624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -10983,6 +11641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10999,6 +11658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11015,6 +11675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11031,6 +11692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11047,6 +11709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11063,6 +11726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11079,6 +11743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11096,6 +11761,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11112,6 +11778,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11129,6 +11796,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11145,6 +11813,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11162,6 +11831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl, i32 1) @@ -11178,6 +11848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11192,6 +11863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11208,6 +11880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11222,6 +11895,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11238,6 +11912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11252,6 +11927,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11268,6 +11944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11284,6 +11961,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11300,6 +11978,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11316,6 +11995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11332,6 +12012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11347,6 +12028,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11363,6 +12045,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11380,6 +12063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11396,6 +12080,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11413,6 +12098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11429,6 +12115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11446,6 +12133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11462,6 +12150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11480,6 +12169,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11496,6 +12186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11514,6 +12205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11530,6 +12222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11548,6 +12241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11564,6 +12258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11583,6 +12278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11599,6 +12295,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11618,6 +12315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11634,6 +12332,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11653,6 +12352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11669,6 +12369,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11689,6 +12390,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11705,6 +12407,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11725,6 +12428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11741,6 +12445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11761,6 +12466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11777,6 +12483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11798,6 +12505,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11814,6 +12522,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11835,6 +12544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11851,6 +12561,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11872,6 +12583,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11888,6 +12600,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11902,6 +12615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11918,6 +12632,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -11932,6 +12647,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11948,6 +12664,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -11962,6 +12679,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -11978,6 +12696,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -11994,6 +12713,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12010,6 +12730,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12026,6 +12747,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12042,6 +12764,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12058,6 +12781,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12074,6 +12798,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12091,6 +12816,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12107,6 +12833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12124,6 +12851,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12140,6 +12868,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12157,6 +12886,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12173,6 +12903,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12191,6 +12922,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12207,6 +12939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12225,6 +12958,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12241,6 +12975,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12259,6 +12994,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12275,6 +13011,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12294,6 +13031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12310,6 +13048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12329,6 +13068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12345,6 +13085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12364,6 +13105,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12380,6 +13122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12400,6 +13143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12416,6 +13160,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12436,6 +13181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12452,6 +13198,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12472,6 +13219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12488,6 +13236,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) @@ -12509,6 +13258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12525,6 +13275,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) @@ -12546,6 +13297,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12562,6 +13314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) @@ -12583,6 +13336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl, i32 1) @@ -12599,6 +13353,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12613,6 +13368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12629,6 +13385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12643,6 +13400,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12659,6 +13417,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12673,6 +13432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12689,6 +13449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12705,6 +13466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12721,6 +13483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12737,6 +13500,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12753,6 +13517,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12769,6 +13534,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12785,6 +13551,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12802,6 +13569,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12818,6 +13586,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12835,6 +13604,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) @@ -12851,6 +13621,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12868,6 +13639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl, i32 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -101,6 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -115,6 +122,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -131,6 +139,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -145,6 +154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -161,6 +171,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -175,6 +186,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -191,6 +203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -205,6 +218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -221,6 +235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -237,6 +252,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -253,6 +269,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -269,6 +286,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -285,6 +303,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -300,6 +319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -316,6 +336,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -332,6 +353,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -348,6 +370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -365,6 +388,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -381,6 +405,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -398,6 +423,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -414,6 +440,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -431,6 +458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -447,6 +475,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -464,6 +493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -480,6 +510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -494,6 +525,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -510,6 +542,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -524,6 +557,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -540,6 +574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -554,6 +589,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -570,6 +606,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -585,6 +622,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -601,6 +639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -617,6 +656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -633,6 +673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -648,6 +689,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -664,6 +706,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -681,6 +724,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -697,6 +741,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -714,6 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -730,6 +776,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -746,6 +793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -762,6 +810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -776,6 +825,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -792,6 +842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -806,6 +857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -822,6 +874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -836,6 +889,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -852,6 +906,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -866,6 +921,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -882,6 +938,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -898,6 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -914,6 +972,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -930,6 +989,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -946,6 +1006,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -962,6 +1023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -978,6 +1040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -994,6 +1057,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1010,6 +1074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1027,6 +1092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1043,6 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1060,6 +1127,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1076,6 +1144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1093,6 +1162,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1109,6 +1179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1126,6 +1197,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1142,6 +1214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1160,6 +1233,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1176,6 +1250,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1194,6 +1269,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1210,6 +1286,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1228,6 +1305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1244,6 +1322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1262,6 +1341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1278,6 +1358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1297,6 +1378,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1313,6 +1395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1332,6 +1415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1348,6 +1432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1367,6 +1452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1383,6 +1469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1402,6 +1489,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1418,6 +1506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1438,6 +1527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1454,6 +1544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1474,6 +1565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1490,6 +1582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1510,6 +1603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1526,6 +1620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1546,6 +1641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1562,6 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1583,6 +1680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1599,6 +1697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1620,6 +1719,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1636,6 +1736,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1657,6 +1758,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1673,6 +1775,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1694,6 +1797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -1710,6 +1814,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1724,6 +1829,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1740,6 +1846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1754,6 +1861,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1770,6 +1878,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1784,6 +1893,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1800,6 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1814,6 +1925,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1830,6 +1942,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1846,6 +1959,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1862,6 +1976,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -1878,6 +1993,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1894,6 +2010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -1910,6 +2027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1926,6 +2044,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -1942,6 +2061,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1958,6 +2078,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -1975,6 +2096,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -1991,6 +2113,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2008,6 +2131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2024,6 +2148,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2041,6 +2166,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2057,6 +2183,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2074,6 +2201,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2090,6 +2218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2108,6 +2237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2124,6 +2254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2142,6 +2273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2158,6 +2290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2176,6 +2309,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2192,6 +2326,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2210,6 +2345,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2226,6 +2362,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2245,6 +2382,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2261,6 +2399,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2280,6 +2419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2296,6 +2436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2315,6 +2456,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2331,6 +2473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2350,6 +2493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2366,6 +2510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2386,6 +2531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2402,6 +2548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2422,6 +2569,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2438,6 +2586,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2458,6 +2607,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2474,6 +2624,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2494,6 +2645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2510,6 +2662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) @@ -2531,6 +2684,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2547,6 +2701,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) @@ -2568,6 +2723,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2584,6 +2740,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) @@ -2605,6 +2762,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2621,6 +2779,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) @@ -2642,6 +2801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -2658,6 +2818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2672,6 +2833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2688,6 +2850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2702,6 +2865,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2718,6 +2882,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2732,6 +2897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2748,6 +2914,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2762,6 +2929,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2778,6 +2946,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2794,6 +2963,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2810,6 +2980,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2826,6 +2997,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2842,6 +3014,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2857,6 +3030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2873,6 +3047,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2888,6 +3063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2904,6 +3080,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2921,6 +3098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2937,6 +3115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2954,6 +3133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -2970,6 +3150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2986,6 +3167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -3002,6 +3184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -3019,6 +3202,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -3035,6 +3219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3049,6 +3234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3065,6 +3251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3079,6 +3266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3095,6 +3283,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3109,6 +3298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3125,6 +3315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3139,6 +3330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3155,6 +3347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3170,6 +3363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3186,6 +3380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3202,6 +3397,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3218,6 +3414,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3233,6 +3430,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3249,6 +3447,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3265,6 +3464,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3281,6 +3481,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3298,6 +3499,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3314,6 +3516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3331,6 +3534,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3347,6 +3551,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3363,6 +3568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3379,6 +3585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3396,6 +3603,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3412,6 +3620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3430,6 +3639,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3446,6 +3656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3464,6 +3675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3480,6 +3692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3497,6 +3710,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3513,6 +3727,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3531,6 +3746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3547,6 +3763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3566,6 +3783,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3582,6 +3800,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3601,6 +3820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3617,6 +3837,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3636,6 +3857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3652,6 +3874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3671,6 +3894,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3687,6 +3911,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3707,6 +3932,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3723,6 +3949,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3743,6 +3970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3759,6 +3987,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3779,6 +4008,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3795,6 +4025,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3815,6 +4046,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3831,6 +4063,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) @@ -3852,6 +4085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3868,6 +4102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) @@ -3889,6 +4124,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3905,6 +4141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) @@ -3926,6 +4163,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3942,6 +4180,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) @@ -3963,6 +4202,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -3979,6 +4219,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -3993,6 +4234,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4009,6 +4251,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4023,6 +4266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4039,6 +4283,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4053,6 +4298,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4069,6 +4315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4083,6 +4330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4099,6 +4347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4115,6 +4364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4131,6 +4381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4147,6 +4398,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4163,6 +4415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4179,6 +4432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4195,6 +4449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4211,6 +4466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4227,6 +4483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4244,6 +4501,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4260,6 +4518,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4277,6 +4536,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4293,6 +4553,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4310,6 +4571,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4326,6 +4588,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4343,6 +4606,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4359,6 +4623,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4377,6 +4642,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4393,6 +4659,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4411,6 +4678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4427,6 +4695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4445,6 +4714,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4461,6 +4731,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4479,6 +4750,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4495,6 +4767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4514,6 +4787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4530,6 +4804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4549,6 +4824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4565,6 +4841,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4584,6 +4861,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4600,6 +4878,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4619,6 +4898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4635,6 +4915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4655,6 +4936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4671,6 +4953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4691,6 +4974,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4707,6 +4991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4727,6 +5012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4743,6 +5029,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4763,6 +5050,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4779,6 +5067,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) @@ -4800,6 +5089,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4816,6 +5106,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) @@ -4837,6 +5128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4853,6 +5145,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) @@ -4874,6 +5167,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4890,6 +5184,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) @@ -4911,6 +5206,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -4927,6 +5223,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4941,6 +5238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -4957,6 +5255,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4971,6 +5270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -4987,6 +5287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5001,6 +5302,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5017,6 +5319,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5031,6 +5334,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5047,6 +5351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5063,6 +5368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5079,6 +5385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5095,6 +5402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5111,6 +5419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5127,6 +5436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5143,6 +5453,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5158,6 +5469,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5174,6 +5486,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5191,6 +5504,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5207,6 +5521,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5224,6 +5539,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5240,6 +5556,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5257,6 +5574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5273,6 +5591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5290,6 +5609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5306,6 +5626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5324,6 +5645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5340,6 +5662,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5358,6 +5681,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5374,6 +5698,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5392,6 +5717,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5408,6 +5734,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5426,6 +5753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5442,6 +5770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5461,6 +5790,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5477,6 +5807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5496,6 +5827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5512,6 +5844,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5531,6 +5864,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5547,6 +5881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5566,6 +5901,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5582,6 +5918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5602,6 +5939,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5618,6 +5956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5638,6 +5977,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5654,6 +5994,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5674,6 +6015,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5690,6 +6032,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5710,6 +6053,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5726,6 +6070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5747,6 +6092,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5763,6 +6109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5784,6 +6131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5800,6 +6148,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5821,6 +6170,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5837,6 +6187,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5858,6 +6209,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -5874,6 +6226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5888,6 +6241,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5904,6 +6258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5918,6 +6273,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5934,6 +6290,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5948,6 +6305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5964,6 +6322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5978,6 +6337,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -5994,6 +6354,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6009,6 +6370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6025,6 +6387,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6041,6 +6404,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6057,6 +6421,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6072,6 +6437,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6088,6 +6454,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6103,6 +6470,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6119,6 +6487,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6136,6 +6505,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6152,6 +6522,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6169,6 +6540,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6185,6 +6557,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6201,6 +6574,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6217,6 +6591,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6233,6 +6608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6249,6 +6625,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6267,6 +6644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6283,6 +6661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6301,6 +6680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6317,6 +6697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6334,6 +6715,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6350,6 +6732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6367,6 +6750,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6383,6 +6767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6402,6 +6787,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6418,6 +6804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6437,6 +6824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6453,6 +6841,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6471,6 +6860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6487,6 +6877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6506,6 +6897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6522,6 +6914,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6542,6 +6935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6558,6 +6952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6578,6 +6973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6594,6 +6990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6613,6 +7010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6629,6 +7027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6649,6 +7048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6665,6 +7065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6686,6 +7087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6702,6 +7104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6723,6 +7126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6739,6 +7143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6759,6 +7164,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6775,6 +7181,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6796,6 +7203,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -6812,6 +7220,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6826,6 +7235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6842,6 +7252,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6856,6 +7267,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6872,6 +7284,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6886,6 +7299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6902,6 +7316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6916,6 +7331,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -6932,6 +7348,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6946,6 +7363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -6962,6 +7380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6976,6 +7395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -6992,6 +7412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7006,6 +7427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7022,6 +7444,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7036,6 +7459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7052,6 +7476,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7067,6 +7492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7083,6 +7509,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7099,6 +7526,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7115,6 +7543,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7130,6 +7559,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7146,6 +7576,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7162,6 +7593,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7178,6 +7610,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7195,6 +7628,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7211,6 +7645,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7228,6 +7663,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7244,6 +7680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7260,6 +7697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7276,6 +7714,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7293,6 +7732,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7309,6 +7749,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7327,6 +7768,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7343,6 +7785,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7361,6 +7804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7377,6 +7821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7394,6 +7839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7410,6 +7856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7428,6 +7875,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7444,6 +7892,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7463,6 +7912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7479,6 +7929,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7498,6 +7949,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7514,6 +7966,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7533,6 +7986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7549,6 +8003,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7568,6 +8023,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7584,6 +8040,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7604,6 +8061,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7620,6 +8078,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7640,6 +8099,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7656,6 +8116,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7676,6 +8137,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7692,6 +8154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7712,6 +8175,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7728,6 +8192,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7749,6 +8214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7765,6 +8231,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7786,6 +8253,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7802,6 +8270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7823,6 +8292,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7839,6 +8309,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7860,6 +8331,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -7876,6 +8348,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -7890,6 +8363,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7906,6 +8380,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -7920,6 +8395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7936,6 +8412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -7950,6 +8427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7966,6 +8444,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -7980,6 +8459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -7996,6 +8476,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8012,6 +8493,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8028,6 +8510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8044,6 +8527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8060,6 +8544,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8076,6 +8561,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8092,6 +8578,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8108,6 +8595,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8124,6 +8612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8141,6 +8630,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8157,6 +8647,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8174,6 +8665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8190,6 +8682,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8207,6 +8700,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8223,6 +8717,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8240,6 +8735,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8256,6 +8752,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8274,6 +8771,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8290,6 +8788,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8308,6 +8807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8324,6 +8824,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8342,6 +8843,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8358,6 +8860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8376,6 +8879,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8392,6 +8896,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8411,6 +8916,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8427,6 +8933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8446,6 +8953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8462,6 +8970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8481,6 +8990,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8497,6 +9007,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8516,6 +9027,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8532,6 +9044,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8552,6 +9065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8568,6 +9082,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8588,6 +9103,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8604,6 +9120,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8624,6 +9141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8640,6 +9158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8660,6 +9179,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8676,6 +9196,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) @@ -8697,6 +9218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8713,6 +9235,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) @@ -8734,6 +9257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8750,6 +9274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) @@ -8771,6 +9296,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8787,6 +9313,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) @@ -8808,6 +9335,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8824,6 +9352,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8838,6 +9367,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8854,6 +9384,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8868,6 +9399,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8884,6 +9416,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -8898,6 +9431,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8914,6 +9448,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -8928,6 +9463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8944,6 +9480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -8960,6 +9497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -8976,6 +9514,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -8992,6 +9531,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9008,6 +9548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9024,6 +9565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9040,6 +9582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9055,6 +9598,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9071,6 +9615,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9088,6 +9633,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9104,6 +9650,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9121,6 +9668,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9137,6 +9685,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9154,6 +9703,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9170,6 +9720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9187,6 +9738,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9203,6 +9755,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9221,6 +9774,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9237,6 +9791,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9255,6 +9810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9271,6 +9827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9289,6 +9846,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9305,6 +9863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9323,6 +9882,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9339,6 +9899,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9358,6 +9919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9374,6 +9936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9393,6 +9956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9409,6 +9973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9428,6 +9993,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9444,6 +10010,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9463,6 +10030,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9479,6 +10047,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9499,6 +10068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9515,6 +10085,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9535,6 +10106,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9551,6 +10123,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9571,6 +10144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9587,6 +10161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9607,6 +10182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9623,6 +10199,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) @@ -9644,6 +10221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9660,6 +10238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) @@ -9681,6 +10260,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9697,6 +10277,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) @@ -9718,6 +10299,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9734,6 +10316,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) @@ -9755,6 +10338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9771,6 +10355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9785,6 +10370,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9801,6 +10387,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9815,6 +10402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9831,6 +10419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9845,6 +10434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9861,6 +10451,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9875,6 +10466,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl, i64 1) @@ -9891,6 +10483,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9905,6 +10498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9921,6 +10515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -9935,6 +10530,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl, i64 1) @@ -9951,6 +10547,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -9965,6 +10562,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -9981,6 +10579,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -9995,6 +10594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10011,6 +10611,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10025,6 +10626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10041,6 +10643,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10055,6 +10658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10071,6 +10675,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10087,6 +10692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10103,6 +10709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10119,6 +10726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10135,6 +10743,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10151,6 +10760,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10167,6 +10777,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10182,6 +10793,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10198,6 +10810,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10215,6 +10828,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10231,6 +10845,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10248,6 +10863,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10264,6 +10880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10281,6 +10898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10297,6 +10915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10314,6 +10933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10330,6 +10950,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10348,6 +10969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10364,6 +10986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10382,6 +11005,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10398,6 +11022,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10416,6 +11041,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10432,6 +11058,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10450,6 +11077,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10466,6 +11094,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10485,6 +11114,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10501,6 +11131,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10520,6 +11151,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10536,6 +11168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10555,6 +11188,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10571,6 +11205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10590,6 +11225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10606,6 +11242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10626,6 +11263,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10642,6 +11280,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10662,6 +11301,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10678,6 +11318,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10698,6 +11339,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10714,6 +11356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10734,6 +11377,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10750,6 +11394,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) @@ -10771,6 +11416,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10787,6 +11433,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) @@ -10808,6 +11455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10824,6 +11472,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) @@ -10845,6 +11494,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10861,6 +11511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) @@ -10882,6 +11533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl, i64 1) @@ -10898,6 +11550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10912,6 +11565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10928,6 +11582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10942,6 +11597,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10958,6 +11614,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10972,6 +11629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -10988,6 +11646,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11002,6 +11661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11018,6 +11678,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11034,6 +11695,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11050,6 +11712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11066,6 +11729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11082,6 +11746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11098,6 +11763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11114,6 +11780,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11130,6 +11797,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11146,6 +11814,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11163,6 +11832,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11179,6 +11849,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11196,6 +11867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11212,6 +11884,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11229,6 +11902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11245,6 +11919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11262,6 +11937,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl, i64 1) @@ -11278,6 +11954,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11292,6 +11969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11308,6 +11986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11322,6 +12001,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11338,6 +12018,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11352,6 +12033,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -11368,6 +12050,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11382,6 +12065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11398,6 +12082,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11412,6 +12097,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11428,6 +12114,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11442,6 +12129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11458,6 +12146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11472,6 +12161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11488,6 +12178,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11502,6 +12193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11518,6 +12210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11532,6 +12225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11548,6 +12242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11562,6 +12257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11578,6 +12274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11592,6 +12289,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11608,6 +12306,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11624,6 +12323,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11640,6 +12340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11656,6 +12357,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11672,6 +12374,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11688,6 +12391,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11704,6 +12408,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11720,6 +12425,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11736,6 +12442,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11753,6 +12460,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11769,6 +12477,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11786,6 +12495,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11802,6 +12512,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11819,6 +12530,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11835,6 +12547,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11852,6 +12565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11868,6 +12582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11886,6 +12601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11902,6 +12618,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11920,6 +12637,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11936,6 +12654,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11954,6 +12673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -11970,6 +12690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11988,6 +12709,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12004,6 +12726,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12023,6 +12746,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12039,6 +12763,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12058,6 +12783,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12074,6 +12800,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12093,6 +12820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12109,6 +12837,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12128,6 +12857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12144,6 +12874,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12164,6 +12895,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12180,6 +12912,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12200,6 +12933,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12216,6 +12950,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12236,6 +12971,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12252,6 +12988,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12272,6 +13009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12288,6 +13026,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12309,6 +13048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12325,6 +13065,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12346,6 +13087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12362,6 +13104,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12383,6 +13126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12399,6 +13143,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12420,6 +13165,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -12436,6 +13182,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12450,6 +13197,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12466,6 +13214,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12480,6 +13229,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12496,6 +13246,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12510,6 +13261,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12526,6 +13278,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12540,6 +13293,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12556,6 +13310,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12572,6 +13327,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12588,6 +13344,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12604,6 +13361,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12620,6 +13378,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12636,6 +13395,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12652,6 +13412,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12667,6 +13428,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12683,6 +13445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12700,6 +13463,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12716,6 +13480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12733,6 +13498,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12749,6 +13515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12766,6 +13533,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12782,6 +13550,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12799,6 +13568,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12815,6 +13585,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12833,6 +13604,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12849,6 +13621,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12867,6 +13640,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12883,6 +13657,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12901,6 +13676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12917,6 +13693,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12935,6 +13712,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12951,6 +13729,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12970,6 +13749,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -12986,6 +13766,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13005,6 +13786,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13021,6 +13803,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13040,6 +13823,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13056,6 +13840,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13075,6 +13860,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13091,6 +13877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13111,6 +13898,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13127,6 +13915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13147,6 +13936,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13163,6 +13953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13183,6 +13974,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13199,6 +13991,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13219,6 +14012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13235,6 +14029,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13256,6 +14051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13272,6 +14068,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13293,6 +14090,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13309,6 +14107,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13330,6 +14129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13346,6 +14146,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13367,6 +14168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -13383,6 +14185,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13397,6 +14200,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13413,6 +14217,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13427,6 +14232,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13443,6 +14249,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13457,6 +14264,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13473,6 +14281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13487,6 +14296,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13503,6 +14313,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13519,6 +14330,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13535,6 +14347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13551,6 +14364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13567,6 +14381,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13583,6 +14398,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13599,6 +14415,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13615,6 +14432,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13631,6 +14449,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13648,6 +14467,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13664,6 +14484,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13681,6 +14502,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13697,6 +14519,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13714,6 +14537,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13730,6 +14554,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13747,6 +14572,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13763,6 +14589,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13781,6 +14608,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13797,6 +14625,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13815,6 +14644,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13831,6 +14661,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13849,6 +14680,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13865,6 +14697,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -13883,6 +14716,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13899,6 +14733,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -13918,6 +14753,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13934,6 +14770,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -13953,6 +14790,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -13969,6 +14807,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -13988,6 +14827,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14004,6 +14844,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14023,6 +14864,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14039,6 +14881,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14059,6 +14902,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14075,6 +14919,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14095,6 +14940,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14111,6 +14957,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14131,6 +14978,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14147,6 +14995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14167,6 +15016,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14183,6 +15033,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) @@ -14204,6 +15055,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14220,6 +15072,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) @@ -14241,6 +15094,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14257,6 +15111,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) @@ -14278,6 +15133,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14294,6 +15150,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) @@ -14315,6 +15172,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -14331,6 +15189,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14345,6 +15204,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14361,6 +15221,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14375,6 +15236,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14391,6 +15253,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14405,6 +15268,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14421,6 +15285,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14435,6 +15300,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14451,6 +15317,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14467,6 +15334,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14483,6 +15351,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14499,6 +15368,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14515,6 +15385,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14531,6 +15402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14547,6 +15419,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14563,6 +15436,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14579,6 +15453,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14596,6 +15471,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14612,6 +15488,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14629,6 +15506,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14645,6 +15523,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14662,6 +15541,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14678,6 +15558,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14695,6 +15576,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14711,6 +15593,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14729,6 +15612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14745,6 +15629,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14763,6 +15648,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14779,6 +15665,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14797,6 +15684,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14813,6 +15701,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14831,6 +15720,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14847,6 +15737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -14866,6 +15757,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14882,6 +15774,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -14901,6 +15794,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14917,6 +15811,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -14936,6 +15831,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14952,6 +15848,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -14971,6 +15868,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -14987,6 +15885,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15007,6 +15906,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15023,6 +15923,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15043,6 +15944,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15059,6 +15961,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15079,6 +15982,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15095,6 +15999,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15115,6 +16020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15131,6 +16037,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) @@ -15152,6 +16059,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15168,6 +16076,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) @@ -15189,6 +16098,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15205,6 +16115,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) @@ -15226,6 +16137,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15242,6 +16154,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) @@ -15263,6 +16176,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15279,6 +16193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15293,6 +16208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15309,6 +16225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15323,6 +16240,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15339,6 +16257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15353,6 +16272,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15369,6 +16289,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15383,6 +16304,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15399,6 +16321,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15415,6 +16338,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15431,6 +16355,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15447,6 +16372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15463,6 +16389,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15478,6 +16405,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15494,6 +16422,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15509,6 +16438,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15525,6 +16455,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15542,6 +16473,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15558,6 +16490,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15575,6 +16508,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15591,6 +16525,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15607,6 +16542,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15623,6 +16559,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15640,6 +16577,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -15656,6 +16594,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15670,6 +16609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15686,6 +16626,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15700,6 +16641,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15716,6 +16658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15730,6 +16673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15746,6 +16690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15760,6 +16705,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -15776,6 +16722,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15790,6 +16737,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15806,6 +16754,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15820,6 +16769,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15836,6 +16786,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15850,6 +16801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15866,6 +16818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15880,6 +16833,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15896,6 +16850,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15912,6 +16867,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15928,6 +16884,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15944,6 +16901,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15960,6 +16918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15976,6 +16935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -15992,6 +16952,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16008,6 +16969,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16024,6 +16986,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16041,6 +17004,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16057,6 +17021,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16074,6 +17039,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16090,6 +17056,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16107,6 +17074,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16123,6 +17091,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16140,6 +17109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl, i64 1) @@ -16156,6 +17126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16170,6 +17141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16186,6 +17158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16200,6 +17173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16216,6 +17190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16230,6 +17205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16246,6 +17222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16260,6 +17237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16276,6 +17254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16291,6 +17270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16307,6 +17287,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16323,6 +17304,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16339,6 +17321,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16354,6 +17337,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16370,6 +17354,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16386,6 +17371,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16402,6 +17388,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16419,6 +17406,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16435,6 +17423,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16452,6 +17441,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16468,6 +17458,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16484,6 +17475,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16500,6 +17492,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16517,6 +17510,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16533,6 +17527,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16551,6 +17546,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16567,6 +17563,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16585,6 +17582,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16601,6 +17599,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16618,6 +17617,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16634,6 +17634,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16652,6 +17653,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16668,6 +17670,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16687,6 +17690,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16703,6 +17707,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16722,6 +17727,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16738,6 +17744,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16757,6 +17764,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16773,6 +17781,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16792,6 +17801,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16808,6 +17818,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16828,6 +17839,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16844,6 +17856,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16864,6 +17877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16880,6 +17894,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16900,6 +17915,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16916,6 +17932,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16936,6 +17953,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16952,6 +17970,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16973,6 +17992,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -16989,6 +18009,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17010,6 +18031,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17026,6 +18048,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17047,6 +18070,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17063,6 +18087,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -17084,6 +18109,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17100,6 +18126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17114,6 +18141,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17130,6 +18158,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17144,6 +18173,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17160,6 +18190,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17174,6 +18205,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17190,6 +18222,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17204,6 +18237,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17220,6 +18254,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17236,6 +18271,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17252,6 +18288,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17268,6 +18305,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17284,6 +18322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17300,6 +18339,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17316,6 +18356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17331,6 +18372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17347,6 +18389,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17364,6 +18407,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17380,6 +18424,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17397,6 +18442,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17413,6 +18459,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17430,6 +18477,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17446,6 +18494,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17463,6 +18512,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17479,6 +18529,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17497,6 +18548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17513,6 +18565,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17531,6 +18584,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17547,6 +18601,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17565,6 +18620,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17581,6 +18637,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17599,6 +18656,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg5ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17615,6 +18673,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17634,6 +18693,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17650,6 +18710,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17669,6 +18730,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17685,6 +18747,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17704,6 +18767,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17720,6 +18784,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17739,6 +18804,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg6ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17755,6 +18821,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17775,6 +18842,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17791,6 +18859,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17811,6 +18880,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17827,6 +18897,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17847,6 +18918,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17863,6 +18935,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -17883,6 +18956,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg7ei64.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17899,6 +18973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) @@ -17920,6 +18995,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17936,6 +19012,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) @@ -17957,6 +19034,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -17973,6 +19051,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) @@ -17994,6 +19073,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -18010,6 +19090,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) @@ -18031,6 +19112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v13 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl, i64 1) @@ -18047,6 +19129,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18061,6 +19144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18077,6 +19161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18091,6 +19176,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18107,6 +19193,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18121,6 +19208,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18137,6 +19225,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18151,6 +19240,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18167,6 +19257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18183,6 +19274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18199,6 +19291,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18215,6 +19308,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18231,6 +19325,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18246,6 +19341,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18262,6 +19358,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18278,6 +19375,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18294,6 +19392,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18311,6 +19410,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18327,6 +19427,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18344,6 +19445,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18360,6 +19462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18377,6 +19480,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) @@ -18393,6 +19497,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18410,6 +19515,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl, i64 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll @@ -12,6 +12,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = add %x, %vc @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -36,6 +38,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = add %x, %vb @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -60,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = add %x, %vc @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -84,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmacc.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = add %x, %va @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = add %x, %vb @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmacc.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = add %x, %va @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -157,6 +169,7 @@ ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = add %x, %va @@ -168,6 +181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -181,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = add %x, %vc @@ -192,6 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -205,6 +221,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = add %x, %vb @@ -216,6 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -229,6 +247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = add %x, %vc @@ -240,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmacc.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = add %x, %va @@ -264,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -277,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmadd.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = add %x, %vb @@ -288,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +326,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = add %x, %va @@ -313,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -326,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = add %x, %vc @@ -337,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -350,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = add %x, %vb @@ -361,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = add %x, %vc @@ -385,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -398,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmacc.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = add %x, %va @@ -409,6 +442,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmacc.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +457,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmadd.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = add %x, %vb @@ -434,6 +469,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmadd.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -447,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmadd.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = add %x, %vc @@ -465,12 +502,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmadd.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmadd_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmadd.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -484,6 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmadd.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = add %x, %vb @@ -502,12 +542,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmacc.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmadd_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmacc.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -521,6 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmadd.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = add %x, %vc @@ -539,12 +582,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmadd.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmadd_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmadd.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -559,6 +604,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmacc.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = add %x, %va @@ -577,12 +623,14 @@ ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmacc.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmadd_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmacc.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -27,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -37,6 +40,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -47,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb ret %vc @@ -57,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb ret %vc @@ -67,6 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb ret %vc @@ -77,6 +84,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb ret %vc @@ -87,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb ret %vc @@ -97,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb ret %vc @@ -107,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -117,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -127,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -137,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -147,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -157,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -170,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmnand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -183,6 +200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmnand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -196,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmnand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -209,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmnand.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = and %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -222,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -235,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -248,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -261,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -274,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = or %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -287,6 +312,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -300,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -313,6 +340,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -326,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -339,6 +368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb %head = insertelement undef, i1 1, i32 0 @@ -352,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -365,6 +396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -378,6 +410,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -391,6 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -404,6 +438,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -417,6 +452,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -430,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -443,6 +480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -456,6 +494,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -469,6 +508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -18,6 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -56,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -94,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -146,6 +157,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -170,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +198,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -208,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -222,6 +239,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -246,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,6 +280,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -284,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -298,6 +321,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -322,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +362,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -360,6 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +403,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -398,6 +429,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -412,6 +444,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -436,6 +470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +485,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +526,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -588,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +649,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +690,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +731,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -709,12 +764,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -729,6 +786,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -760,12 +819,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -780,6 +841,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -811,12 +874,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -831,6 +896,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb %vc = select %cmp, %va, %vb @@ -862,12 +929,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmax.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -882,6 +951,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmax.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -18,6 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -56,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -94,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -146,6 +157,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -170,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +198,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -208,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -222,6 +239,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -246,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,6 +280,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -284,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -298,6 +321,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -322,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +362,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -360,6 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +403,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -398,6 +429,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -412,6 +444,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -436,6 +470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +485,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +526,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -588,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +649,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +690,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +731,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -709,12 +764,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -729,6 +786,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -760,12 +819,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -780,6 +841,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -811,12 +874,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -831,6 +896,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb %vc = select %cmp, %va, %vb @@ -862,12 +929,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmax_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmaxu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -882,6 +951,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmaxu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -18,6 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -56,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -94,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -146,6 +157,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -170,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +198,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -208,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -222,6 +239,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -246,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,6 +280,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -284,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -298,6 +321,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -322,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +362,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -360,6 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +403,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -398,6 +429,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -412,6 +444,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -436,6 +470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +485,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +526,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -588,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +649,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +690,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +731,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -709,12 +764,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -729,6 +786,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -760,12 +819,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -780,6 +841,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -811,12 +874,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -831,6 +896,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmin.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb %vc = select %cmp, %va, %vb @@ -862,12 +929,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmin.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -882,6 +951,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmin.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -18,6 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -32,6 +34,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -56,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +75,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -94,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -146,6 +157,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -170,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +198,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -208,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -222,6 +239,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -246,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,6 +280,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -284,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -298,6 +321,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -311,6 +335,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -322,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +362,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -360,6 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +403,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -387,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -398,6 +429,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -412,6 +444,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -436,6 +470,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +485,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -463,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -474,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +526,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +540,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -512,6 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -550,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -588,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +649,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +690,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +731,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -691,6 +745,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -709,12 +764,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -729,6 +786,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -760,12 +819,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -780,6 +841,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -811,12 +874,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -831,6 +896,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -844,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vminu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb %vc = select %cmp, %va, %vb @@ -862,12 +929,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmin_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vminu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -882,6 +951,7 @@ ; CHECK-NEXT: addi a0, zero, -3 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vminu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -3, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -30,6 +32,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -42,6 +45,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -52,6 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -65,6 +70,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -77,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -87,6 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -112,6 +121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -122,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -135,6 +146,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -157,6 +170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -170,6 +184,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -182,6 +197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -192,6 +208,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -205,6 +222,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -217,6 +235,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -227,6 +246,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -240,6 +260,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -252,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -262,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -275,6 +298,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -287,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -297,6 +322,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -310,6 +336,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -322,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -332,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -345,6 +374,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -357,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -367,6 +398,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +412,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -392,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -402,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -415,6 +450,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -427,6 +463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -437,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +488,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -462,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -472,6 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -485,6 +526,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -497,6 +539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -507,6 +550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -520,6 +564,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -532,6 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -542,6 +588,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -555,6 +602,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -567,6 +615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -577,6 +626,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -590,6 +640,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -602,6 +653,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -612,6 +664,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -625,6 +678,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -637,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -654,12 +709,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -673,6 +730,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -685,6 +743,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -697,6 +756,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -709,6 +769,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -726,12 +787,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -745,6 +808,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -757,6 +821,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -769,6 +834,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -781,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -798,12 +865,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -817,6 +886,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -829,6 +899,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -841,6 +912,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -853,6 +925,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = mul %va, %vb ret %vc @@ -870,12 +943,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -889,6 +964,7 @@ ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -901,6 +977,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -913,6 +990,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -97,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -125,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -209,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -247,6 +266,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -259,6 +279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -285,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -297,6 +320,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -309,6 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -325,6 +350,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -335,6 +361,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -347,6 +374,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -359,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -375,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -385,6 +415,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -397,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -409,6 +441,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -435,6 +469,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -447,6 +482,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -459,6 +495,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -475,6 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -485,6 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -497,6 +536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -509,6 +549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -525,6 +566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -535,6 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -547,6 +590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -559,6 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -575,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -585,6 +631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -597,6 +644,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +657,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -625,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -635,6 +685,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -647,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -675,6 +728,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -685,6 +739,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -697,6 +752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -709,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -725,6 +782,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -735,6 +793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -747,6 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -759,6 +819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -775,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -785,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -797,6 +860,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -809,6 +873,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -825,6 +890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv7i32( %va, %b, %m, i32 %evl) ret %v @@ -835,6 +901,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -847,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -859,6 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -875,6 +944,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -885,6 +955,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -897,6 +968,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -909,6 +981,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -925,6 +998,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -935,6 +1009,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -947,6 +1022,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -959,6 +1035,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -975,6 +1052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -985,6 +1063,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1005,12 +1084,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1031,12 +1112,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1053,6 +1136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1063,6 +1147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1083,12 +1168,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1109,12 +1196,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1131,6 +1220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1141,6 +1231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1161,12 +1252,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1187,12 +1280,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1209,6 +1304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1219,6 +1315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmul.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1239,12 +1336,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1265,12 +1364,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vsrl.vi v9, v9, 1 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vmsleu.vx v0, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head_six = insertelement undef, i8 6, i32 0 %splat_six = shufflevector %head_six, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll @@ -12,6 +12,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = sub %vc, %x @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -36,6 +38,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = sub %vb, %x @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -60,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = sub %vc, %x @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -84,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = sub %va, %x @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = sub %vb, %x @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = sub %va, %x @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -157,6 +169,7 @@ ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = sub %va, %x @@ -168,6 +181,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -181,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = sub %vc, %x @@ -192,6 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -205,6 +221,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = sub %vb, %x @@ -216,6 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -229,6 +247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = sub %vc, %x @@ -240,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = sub %va, %x @@ -264,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -277,6 +299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = sub %vb, %x @@ -288,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +326,7 @@ ; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %vb %y = sub %va, %x @@ -313,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -326,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = sub %vc, %x @@ -337,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -350,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = sub %vb, %x @@ -361,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = sub %vc, %x @@ -385,6 +416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -398,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = sub %va, %x @@ -409,6 +442,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vnmsac.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +457,7 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v24, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vc, %va %y = sub %vb, %x @@ -434,6 +469,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vnmsub.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -447,6 +483,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v9, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vb %y = sub %vc, %x @@ -465,12 +502,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vnmsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vnmsub.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -484,6 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %va, %vc %y = sub %vb, %x @@ -502,12 +542,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vnmsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -521,6 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %va %y = sub %vc, %x @@ -539,12 +582,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vnmsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vnmsub.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -559,6 +604,7 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v24 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %x = mul %vb, %vc %y = sub %va, %x @@ -577,12 +623,14 @@ ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vnmsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %c, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -44,6 +47,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -56,6 +60,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -68,6 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -80,6 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -93,6 +100,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -105,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -117,6 +126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -129,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -142,6 +153,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -154,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -166,6 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -178,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -191,6 +206,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -215,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -240,6 +259,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -252,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -264,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -276,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -289,6 +312,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -301,6 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -313,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -325,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -338,6 +365,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -350,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -362,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -374,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -387,6 +418,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -399,6 +431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -411,6 +444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -436,6 +471,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -448,6 +484,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -460,6 +497,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -472,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -485,6 +524,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -497,6 +537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -509,6 +550,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -521,6 +563,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -534,6 +577,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -546,6 +590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -558,6 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -570,6 +616,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -583,6 +630,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -595,6 +643,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -607,6 +656,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -619,6 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -632,6 +683,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -644,6 +696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -656,6 +709,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -668,6 +722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -681,6 +736,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -693,6 +749,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -705,6 +762,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -717,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -730,6 +789,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +802,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -754,6 +815,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -766,6 +828,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -779,6 +842,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -791,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -803,6 +868,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -815,6 +881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -828,6 +895,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -840,6 +908,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -852,6 +921,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -864,6 +934,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -877,6 +948,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -896,12 +968,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,6 +988,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -926,6 +1001,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -939,6 +1015,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -958,12 +1035,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -976,6 +1055,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -988,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1001,6 +1082,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1020,12 +1102,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1038,6 +1122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1050,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1063,6 +1149,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1082,12 +1169,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1100,6 +1189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, -12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -12, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1112,6 +1202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1125,6 +1216,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1138,6 +1230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -101,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -123,6 +132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -199,6 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -237,6 +255,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -253,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -263,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -287,6 +309,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -301,6 +324,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -313,6 +337,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -329,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -339,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -351,6 +378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -363,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +419,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -427,6 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -439,6 +473,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -491,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -503,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -529,6 +570,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -541,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -567,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -579,6 +624,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -591,6 +637,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +652,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -617,6 +665,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -633,6 +682,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -643,6 +693,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -655,6 +706,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -667,6 +719,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -681,6 +734,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -693,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -709,6 +764,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -719,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -731,6 +788,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -757,6 +816,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -769,6 +829,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -785,6 +846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -795,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -807,6 +870,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -819,6 +883,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -833,6 +898,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -845,6 +911,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -861,6 +928,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -871,6 +939,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -883,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -895,6 +965,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -909,6 +980,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -921,6 +993,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -937,6 +1010,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -947,6 +1021,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -959,6 +1034,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -971,6 +1047,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -985,6 +1062,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -997,6 +1075,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1013,6 +1092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1023,6 +1103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1035,6 +1116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1047,6 +1129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1061,6 +1144,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1073,6 +1157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1089,6 +1174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1099,6 +1185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1111,6 +1198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1123,6 +1211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1137,6 +1226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1149,6 +1239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1165,6 +1256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1175,6 +1267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1187,6 +1280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1199,6 +1293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1213,6 +1308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1225,6 +1321,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1241,6 +1338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1251,6 +1349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1263,6 +1362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1275,6 +1375,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1289,6 +1390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1301,6 +1403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1317,6 +1420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv10i32( %va, %b, %m, i32 %evl) ret %v @@ -1327,6 +1431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1339,6 +1444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1351,6 +1457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1365,6 +1472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1377,6 +1485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1393,6 +1502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1403,6 +1513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1415,6 +1526,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1427,6 +1539,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1441,6 +1554,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1453,6 +1567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1469,6 +1584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1479,6 +1595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1499,12 +1616,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1525,12 +1644,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1545,6 +1666,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1557,6 +1679,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1573,6 +1696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1583,6 +1707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1603,12 +1728,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1629,12 +1756,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1649,6 +1778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1661,6 +1791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1677,6 +1808,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1687,6 +1819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1707,12 +1840,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1733,12 +1868,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1753,6 +1890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1765,6 +1903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1781,6 +1920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1791,6 +1931,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1811,12 +1952,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1837,12 +1980,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1857,6 +2002,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1869,6 +2015,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -12,6 +12,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i8: @@ -19,6 +20,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1i8.nxv1p0i8( %ptrs, %m, i32 %evl) ret %v @@ -32,6 +34,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8: @@ -39,6 +42,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) ret %v @@ -51,6 +55,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i16: @@ -59,6 +64,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -72,6 +78,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i16: @@ -80,6 +87,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -93,6 +101,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i32: @@ -101,6 +110,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -114,6 +124,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf4 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i32: @@ -122,6 +133,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -135,6 +147,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf8 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i64: @@ -143,6 +156,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf8 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -156,6 +170,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf8 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i64: @@ -164,6 +179,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf8 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -178,6 +194,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i8: @@ -185,6 +202,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i8.nxv4p0i8( %ptrs, %m, i32 %evl) ret %v @@ -196,6 +214,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i8: @@ -203,6 +222,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -218,6 +238,7 @@ ; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i8: @@ -225,6 +246,7 @@ ; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i8.nxv8p0i8( %ptrs, %m, i32 %evl) ret %v @@ -237,6 +259,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8: @@ -245,6 +268,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.vp.gather.nxv8i8.nxv8p0i8( %ptrs, %m, i32 %evl) @@ -259,6 +283,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i16: @@ -266,6 +291,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1i16.nxv1p0i16( %ptrs, %m, i32 %evl) ret %v @@ -279,6 +305,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16: @@ -286,6 +313,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) ret %v @@ -298,6 +326,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vsext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i32: @@ -306,6 +335,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -319,6 +349,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vzext.vf2 v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i32: @@ -327,6 +358,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -340,6 +372,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf4 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i64: @@ -348,6 +381,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -361,6 +395,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf4 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i64: @@ -369,6 +404,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf4 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -383,6 +419,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i16: @@ -390,6 +427,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i16.nxv4p0i16( %ptrs, %m, i32 %evl) ret %v @@ -401,6 +439,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i16: @@ -408,6 +447,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -423,6 +463,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i16: @@ -430,6 +471,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) ret %v @@ -443,6 +485,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i16: @@ -452,6 +495,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) @@ -466,6 +510,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16: @@ -475,6 +520,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -490,6 +536,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16: @@ -499,6 +546,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -514,6 +562,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i16: @@ -523,6 +572,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) @@ -536,6 +586,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i32: @@ -543,6 +594,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1i32.nxv1p0i32( %ptrs, %m, i32 %evl) ret %v @@ -555,6 +607,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i32: @@ -562,6 +615,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) ret %v @@ -574,6 +628,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf2 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i32_sextload_nxv2i64: @@ -582,6 +637,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vsext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) %ev = sext %v to @@ -595,6 +651,7 @@ ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf2 v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i32_zextload_nxv2i64: @@ -603,6 +660,7 @@ ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vzext.vf2 v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) %ev = zext %v to @@ -616,6 +674,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i32: @@ -623,6 +682,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i32.nxv4p0i32( %ptrs, %m, i32 %evl) ret %v @@ -633,6 +693,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i32: @@ -640,6 +701,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -654,6 +716,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i32: @@ -661,6 +724,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) ret %v @@ -674,6 +738,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i32: @@ -683,6 +748,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) @@ -697,6 +763,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32: @@ -706,6 +773,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -721,6 +789,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32: @@ -730,6 +799,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -745,6 +815,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32: @@ -754,6 +825,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) @@ -768,6 +840,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32: @@ -777,6 +850,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -792,6 +866,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32: @@ -801,6 +876,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -815,6 +891,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i32: @@ -824,6 +901,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) @@ -838,12 +916,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1i64.nxv1p0i64( %ptrs, %m, i32 %evl) ret %v @@ -857,12 +937,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i64.nxv2p0i64( %ptrs, %m, i32 %evl) ret %v @@ -876,12 +958,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i64.nxv4p0i64( %ptrs, %m, i32 %evl) ret %v @@ -893,12 +977,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -914,12 +1000,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) ret %v @@ -933,6 +1021,7 @@ ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i64: @@ -942,6 +1031,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) @@ -956,6 +1046,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -971,6 +1062,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -986,6 +1078,7 @@ ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: @@ -995,6 +1088,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) @@ -1009,6 +1103,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1024,6 +1119,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1038,6 +1134,7 @@ ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: @@ -1047,6 +1144,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) @@ -1061,6 +1159,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1076,6 +1175,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1090,6 +1190,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.vp.gather.nxv8i64.nxv8p0i64( %ptrs, %m, i32 %evl) @@ -1104,6 +1205,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f16: @@ -1111,6 +1213,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1f16.nxv1p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1124,6 +1227,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f16: @@ -1131,6 +1235,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2f16.nxv2p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1144,6 +1249,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f16: @@ -1151,6 +1257,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f16.nxv4p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1162,6 +1269,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f16: @@ -1169,6 +1277,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1184,6 +1293,7 @@ ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f16: @@ -1191,6 +1301,7 @@ ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1204,6 +1315,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f16: @@ -1213,6 +1325,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) @@ -1227,6 +1340,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16: @@ -1236,6 +1350,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1251,6 +1366,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16: @@ -1260,6 +1376,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1275,6 +1392,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8f16: @@ -1284,6 +1402,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) @@ -1297,6 +1416,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f32: @@ -1304,6 +1424,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1f32.nxv1p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1316,6 +1437,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f32: @@ -1323,6 +1445,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t ; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2f32.nxv2p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1335,6 +1458,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f32: @@ -1342,6 +1466,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f32.nxv4p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1352,6 +1477,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f32: @@ -1359,6 +1485,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1373,6 +1500,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f32: @@ -1380,6 +1508,7 @@ ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1393,6 +1522,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f32: @@ -1402,6 +1532,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) @@ -1416,6 +1547,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32: @@ -1425,6 +1557,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1440,6 +1573,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32: @@ -1449,6 +1583,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1464,6 +1599,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32: @@ -1473,6 +1609,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) @@ -1487,6 +1624,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32: @@ -1496,6 +1634,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1511,6 +1650,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32: @@ -1520,6 +1660,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1534,6 +1675,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8f32: @@ -1543,6 +1685,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) @@ -1557,12 +1700,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv1f64.nxv1p0f64( %ptrs, %m, i32 %evl) ret %v @@ -1576,12 +1721,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t ; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2f64.nxv2p0f64( %ptrs, %m, i32 %evl) ret %v @@ -1595,12 +1742,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f64.nxv4p0f64( %ptrs, %m, i32 %evl) ret %v @@ -1612,12 +1761,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1633,12 +1784,14 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) ret %v @@ -1652,6 +1805,7 @@ ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f64: @@ -1661,6 +1815,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) @@ -1675,6 +1830,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1690,6 +1846,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1705,6 +1862,7 @@ ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: @@ -1714,6 +1872,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) @@ -1728,6 +1887,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1743,6 +1903,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1757,6 +1918,7 @@ ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: @@ -1766,6 +1928,7 @@ ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) @@ -1780,6 +1943,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1795,6 +1959,7 @@ ; CHECK-NEXT: vsll.vi v8, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1809,6 +1974,7 @@ ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei64.v v8, (a0), v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.vp.gather.nxv8f64.nxv8p0f64( %ptrs, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i8(* %ptr, %m, i32 %evl) ret %load @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i8(* %ptr, %m, i32 %evl) ret %load @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i8(* %ptr, %m, i32 %evl) ret %load @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i8(* %ptr, %m, i32 %evl) ret %load @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i16(* %ptr, %m, i32 %evl) ret %load @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i16(* %ptr, %m, i32 %evl) ret %load @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i16(* %ptr, %m, i32 %evl) ret %load @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i16(* %ptr, %m, i32 %evl) ret %load @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i32(* %ptr, %m, i32 %evl) ret %load @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i32(* %ptr, %m, i32 %evl) ret %load @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i32(* %ptr, %m, i32 %evl) ret %load @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i32(* %ptr, %m, i32 %evl) ret %load @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1i64(* %ptr, %m, i32 %evl) ret %load @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2i64(* %ptr, %m, i32 %evl) ret %load @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4i64(* %ptr, %m, i32 %evl) ret %load @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8i64(* %ptr, %m, i32 %evl) ret %load @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f16(* %ptr, %m, i32 %evl) ret %load @@ -215,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f16(* %ptr, %m, i32 %evl) ret %load @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f16(* %ptr, %m, i32 %evl) ret %load @@ -239,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f16(* %ptr, %m, i32 %evl) ret %load @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f32(* %ptr, %m, i32 %evl) ret %load @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f32(* %ptr, %m, i32 %evl) ret %load @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f32(* %ptr, %m, i32 %evl) ret %load @@ -287,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f32(* %ptr, %m, i32 %evl) ret %load @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv1f64(* %ptr, %m, i32 %evl) ret %load @@ -311,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv2f64(* %ptr, %m, i32 %evl) ret %load @@ -323,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv4f64(* %ptr, %m, i32 %evl) ret %load @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv8f64(* %ptr, %m, i32 %evl) ret %load diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -11,12 +11,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i8.nxv1p0i8( %val, %ptrs, %m, i32 %evl) ret void @@ -29,12 +31,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, %m, i32 %evl) ret void @@ -47,6 +51,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: @@ -55,6 +60,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) @@ -70,6 +76,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: @@ -80,6 +87,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) @@ -97,6 +105,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: @@ -109,6 +118,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %tval, %ptrs, %m, i32 %evl) @@ -122,12 +132,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, %m, i32 %evl) ret void @@ -138,12 +150,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -158,12 +172,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, %m, i32 %evl) ret void @@ -176,6 +192,7 @@ ; RV32-NEXT: vsext.vf4 v12, v9 ; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8: @@ -184,6 +201,7 @@ ; RV64-NEXT: vsext.vf8 v16, v9 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs call void @llvm.vp.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, %m, i32 %evl) @@ -197,12 +215,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i16.nxv1p0i16( %val, %ptrs, %m, i32 %evl) ret void @@ -215,12 +235,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %val, %ptrs, %m, i32 %evl) ret void @@ -233,6 +255,7 @@ ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: @@ -241,6 +264,7 @@ ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, %m, i32 %evl) @@ -256,6 +280,7 @@ ; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: @@ -266,6 +291,7 @@ ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %tval, %ptrs, %m, i32 %evl) @@ -279,12 +305,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, %m, i32 %evl) ret void @@ -295,12 +323,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -315,12 +345,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) ret void @@ -334,6 +366,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16: @@ -343,6 +376,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) @@ -357,6 +391,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16: @@ -366,6 +401,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -381,6 +417,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16: @@ -390,6 +427,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -405,6 +443,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16: @@ -414,6 +453,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) @@ -427,12 +467,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i32.nxv1p0i32( %val, %ptrs, %m, i32 %evl) ret void @@ -445,12 +487,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i32.nxv2p0i32( %val, %ptrs, %m, i32 %evl) ret void @@ -463,6 +507,7 @@ ; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: @@ -471,6 +516,7 @@ ; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %tval = trunc %val to call void @llvm.vp.scatter.nxv2i32.nxv2p0i32( %tval, %ptrs, %m, i32 %evl) @@ -484,12 +530,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, %m, i32 %evl) ret void @@ -500,12 +548,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -520,12 +570,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) ret void @@ -539,6 +591,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32: @@ -548,6 +601,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) @@ -562,6 +616,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32: @@ -571,6 +626,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -586,6 +642,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32: @@ -595,6 +652,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -610,6 +668,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32: @@ -619,6 +678,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) @@ -633,6 +693,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32: @@ -642,6 +703,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -657,6 +719,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32: @@ -666,6 +729,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -680,6 +744,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32: @@ -689,6 +754,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) @@ -702,12 +768,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i64.nxv1p0i64( %val, %ptrs, %m, i32 %evl) ret void @@ -720,12 +788,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i64.nxv2p0i64( %val, %ptrs, %m, i32 %evl) ret void @@ -738,12 +808,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, %m, i32 %evl) ret void @@ -754,12 +826,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -774,12 +848,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) ret void @@ -793,6 +869,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64: @@ -802,6 +879,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) @@ -816,6 +894,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -831,6 +910,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -846,6 +926,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: @@ -855,6 +936,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) @@ -869,6 +951,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -884,6 +967,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -898,6 +982,7 @@ ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: @@ -907,6 +992,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) @@ -921,6 +1007,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -936,6 +1023,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -950,6 +1038,7 @@ ; CHECK-NEXT: vsll.vi v16, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) @@ -963,12 +1052,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f16.nxv1p0f16( %val, %ptrs, %m, i32 %evl) ret void @@ -981,12 +1072,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f16.nxv2p0f16( %val, %ptrs, %m, i32 %evl) ret void @@ -999,12 +1092,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, %m, i32 %evl) ret void @@ -1015,12 +1110,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1035,12 +1132,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) ret void @@ -1054,6 +1153,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16: @@ -1063,6 +1163,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) @@ -1077,6 +1178,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16: @@ -1086,6 +1188,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1101,6 +1204,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16: @@ -1110,6 +1214,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1125,6 +1230,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8f16: @@ -1134,6 +1240,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) @@ -1147,12 +1254,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f32.nxv1p0f32( %val, %ptrs, %m, i32 %evl) ret void @@ -1165,12 +1274,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f32.nxv2p0f32( %val, %ptrs, %m, i32 %evl) ret void @@ -1183,12 +1294,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, %m, i32 %evl) ret void @@ -1199,12 +1312,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1219,12 +1334,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) ret void @@ -1238,6 +1355,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32: @@ -1247,6 +1365,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) @@ -1261,6 +1380,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32: @@ -1270,6 +1390,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1285,6 +1406,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32: @@ -1294,6 +1416,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1309,6 +1432,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32: @@ -1318,6 +1442,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) @@ -1332,6 +1457,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32: @@ -1341,6 +1467,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1356,6 +1483,7 @@ ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32: @@ -1365,6 +1493,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1379,6 +1508,7 @@ ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8f32: @@ -1388,6 +1518,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) @@ -1401,12 +1532,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f64.nxv1p0f64( %val, %ptrs, %m, i32 %evl) ret void @@ -1419,12 +1552,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f64.nxv2p0f64( %val, %ptrs, %m, i32 %evl) ret void @@ -1437,12 +1572,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, %m, i32 %evl) ret void @@ -1453,12 +1590,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1473,12 +1612,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) ret void @@ -1492,6 +1633,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64: @@ -1501,6 +1643,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) @@ -1515,6 +1658,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1530,6 +1674,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1545,6 +1690,7 @@ ; RV32-NEXT: vsll.vi v16, v20, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: @@ -1554,6 +1700,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) @@ -1568,6 +1715,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1583,6 +1731,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1597,6 +1746,7 @@ ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: @@ -1606,6 +1756,7 @@ ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) @@ -1620,6 +1771,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1635,6 +1787,7 @@ ; CHECK-NEXT: vsll.vi v16, v24, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1649,6 +1802,7 @@ ; CHECK-NEXT: vsll.vi v16, v16, 3 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i8( %val, * %ptr, %m, i32 %evl) ret void @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i8( %val, * %ptr, %m, i32 %evl) ret void @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i8( %val, * %ptr, %m, i32 %evl) ret void @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i8( %val, * %ptr, %m, i32 %evl) ret void @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i16( %val, * %ptr, %m, i32 %evl) ret void @@ -71,6 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i16( %val, * %ptr, %m, i32 %evl) ret void @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i16( %val, * %ptr, %m, i32 %evl) ret void @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i16( %val, * %ptr, %m, i32 %evl) ret void @@ -107,6 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i32( %val, * %ptr, %m, i32 %evl) ret void @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i32( %val, * %ptr, %m, i32 %evl) ret void @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i32( %val, * %ptr, %m, i32 %evl) ret void @@ -143,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i32( %val, * %ptr, %m, i32 %evl) ret void @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i64( %val, * %ptr, %m, i32 %evl) ret void @@ -167,6 +180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i64( %val, * %ptr, %m, i32 %evl) ret void @@ -179,6 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i64( %val, * %ptr, %m, i32 %evl) ret void @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i64( %val, * %ptr, %m, i32 %evl) ret void @@ -203,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f16( %val, * %ptr, %m, i32 %evl) ret void @@ -215,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f16( %val, * %ptr, %m, i32 %evl) ret void @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f16( %val, * %ptr, %m, i32 %evl) ret void @@ -239,6 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f16( %val, * %ptr, %m, i32 %evl) ret void @@ -251,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f32( %val, * %ptr, %m, i32 %evl) ret void @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f32( %val, * %ptr, %m, i32 %evl) ret void @@ -275,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f32( %val, * %ptr, %m, i32 %evl) ret void @@ -287,6 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f32( %val, * %ptr, %m, i32 %evl) ret void @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f64( %val, * %ptr, %m, i32 %evl) ret void @@ -311,6 +336,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f64( %val, * %ptr, %m, i32 %evl) ret void @@ -323,6 +349,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f64( %val, * %ptr, %m, i32 %evl) ret void @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f64( %val, * %ptr, %m, i32 %evl) ret void @@ -345,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = insertelement undef, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -17,6 +17,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc half @llvm.vector.reduce.fadd.nxv1f16(half %s, %v) ret half %red @@ -30,6 +31,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fadd.nxv1f16(half %s, %v) ret half %red @@ -48,6 +50,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc half @llvm.vector.reduce.fadd.nxv2f16(half %s, %v) ret half %red @@ -61,6 +64,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fadd.nxv2f16(half %s, %v) ret half %red @@ -78,6 +82,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc half @llvm.vector.reduce.fadd.nxv4f16(half %s, %v) ret half %red @@ -90,6 +95,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fadd.nxv4f16(half %s, %v) ret half %red @@ -108,6 +114,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc float @llvm.vector.reduce.fadd.nxv1f32(float %s, %v) ret float %red @@ -121,6 +128,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fadd.nxv1f32(float %s, %v) ret float %red @@ -138,6 +146,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc float @llvm.vector.reduce.fadd.nxv2f32(float %s, %v) ret float %red @@ -150,6 +159,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fadd.nxv2f32(float %s, %v) ret float %red @@ -168,6 +178,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float %s, %v) ret float %red @@ -181,6 +192,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fadd.nxv4f32(float %s, %v) ret float %red @@ -198,6 +210,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv1f64(double %s, %v) ret double %red @@ -210,6 +223,7 @@ ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fadd.nxv1f64(double %s, %v) ret double %red @@ -228,6 +242,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv2f64(double %s, %v) ret double %red @@ -241,6 +256,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fadd.nxv2f64(double %s, %v) ret double %red @@ -259,6 +275,7 @@ ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv4f64(double %s, %v) ret double %red @@ -272,6 +289,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fadd.nxv4f64(double %s, %v) ret double %red @@ -289,6 +307,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmin.nxv1f16( %v) ret half %red @@ -304,6 +323,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan half @llvm.vector.reduce.fmin.nxv1f16( %v) ret half %red @@ -319,6 +339,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf half @llvm.vector.reduce.fmin.nxv1f16( %v) ret half %red @@ -336,6 +357,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmin.nxv2f16( %v) ret half %red @@ -352,6 +374,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmin.nxv4f16( %v) ret half %red @@ -371,6 +394,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmin.nxv64f16( %v) ret half %red @@ -388,6 +412,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv1f32( %v) ret float %red @@ -403,6 +428,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan float @llvm.vector.reduce.fmin.nxv1f32( %v) ret float %red @@ -418,6 +444,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf float @llvm.vector.reduce.fmin.nxv1f32( %v) ret float %red @@ -434,6 +461,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv2f32( %v) ret float %red @@ -451,6 +479,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv4f32( %v) ret float %red @@ -470,6 +499,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv32f32( %v) ret float %red @@ -486,6 +516,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmin.nxv1f64( %v) ret double %red @@ -500,6 +531,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan double @llvm.vector.reduce.fmin.nxv1f64( %v) ret double %red @@ -514,6 +546,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf double @llvm.vector.reduce.fmin.nxv1f64( %v) ret double %red @@ -531,6 +564,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmin.nxv2f64( %v) ret double %red @@ -548,6 +582,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmin.nxv4f64( %v) ret double %red @@ -567,6 +602,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmin.nxv16f64( %v) ret double %red @@ -584,6 +620,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv1f16( %v) ret half %red @@ -599,6 +636,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan half @llvm.vector.reduce.fmax.nxv1f16( %v) ret half %red @@ -614,6 +652,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf half @llvm.vector.reduce.fmax.nxv1f16( %v) ret half %red @@ -631,6 +670,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv2f16( %v) ret half %red @@ -647,6 +687,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv4f16( %v) ret half %red @@ -666,6 +707,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv64f16( %v) ret half %red @@ -683,6 +725,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv1f32( %v) ret float %red @@ -698,6 +741,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan float @llvm.vector.reduce.fmax.nxv1f32( %v) ret float %red @@ -713,6 +757,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf float @llvm.vector.reduce.fmax.nxv1f32( %v) ret float %red @@ -729,6 +774,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv2f32( %v) ret float %red @@ -746,6 +792,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv4f32( %v) ret float %red @@ -765,6 +812,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv32f32( %v) ret float %red @@ -781,6 +829,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmax.nxv1f64( %v) ret double %red @@ -795,6 +844,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan double @llvm.vector.reduce.fmax.nxv1f64( %v) ret double %red @@ -809,6 +859,7 @@ ; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call nnan ninf double @llvm.vector.reduce.fmax.nxv1f64( %v) ret double %red @@ -826,6 +877,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmax.nxv2f64( %v) ret double %red @@ -843,6 +895,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmax.nxv4f64( %v) ret double %red @@ -862,6 +915,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call double @llvm.vector.reduce.fmax.nxv16f64( %v) ret double %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc half @llvm.vp.reduce.fadd.nxv1f16(half %s, %v, %m, i32 %evl) ret half %r @@ -27,6 +28,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call half @llvm.vp.reduce.fadd.nxv1f16(half %s, %v, %m, i32 %evl) ret half %r @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc half @llvm.vp.reduce.fadd.nxv2f16(half %s, %v, %m, i32 %evl) ret half %r @@ -55,6 +58,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call half @llvm.vp.reduce.fadd.nxv2f16(half %s, %v, %m, i32 %evl) ret half %r @@ -70,6 +74,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc half @llvm.vp.reduce.fadd.nxv4f16(half %s, %v, %m, i32 %evl) ret half %r @@ -83,6 +88,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call half @llvm.vp.reduce.fadd.nxv4f16(half %s, %v, %m, i32 %evl) ret half %r @@ -98,6 +104,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc float @llvm.vp.reduce.fadd.nxv1f32(float %s, %v, %m, i32 %evl) ret float %r @@ -111,6 +118,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call float @llvm.vp.reduce.fadd.nxv1f32(float %s, %v, %m, i32 %evl) ret float %r @@ -126,6 +134,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc float @llvm.vp.reduce.fadd.nxv2f32(float %s, %v, %m, i32 %evl) ret float %r @@ -139,6 +148,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call float @llvm.vp.reduce.fadd.nxv2f32(float %s, %v, %m, i32 %evl) ret float %r @@ -154,6 +164,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float %s, %v, %m, i32 %evl) ret float %r @@ -167,6 +178,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call float @llvm.vp.reduce.fadd.nxv4f32(float %s, %v, %m, i32 %evl) ret float %r @@ -182,6 +194,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc double @llvm.vp.reduce.fadd.nxv1f64(double %s, %v, %m, i32 %evl) ret double %r @@ -195,6 +208,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call double @llvm.vp.reduce.fadd.nxv1f64(double %s, %v, %m, i32 %evl) ret double %r @@ -210,6 +224,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc double @llvm.vp.reduce.fadd.nxv2f64(double %s, %v, %m, i32 %evl) ret double %r @@ -223,6 +238,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call double @llvm.vp.reduce.fadd.nxv2f64(double %s, %v, %m, i32 %evl) ret double %r @@ -238,6 +254,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call reassoc double @llvm.vp.reduce.fadd.nxv4f64(double %s, %v, %m, i32 %evl) ret double %r @@ -251,6 +268,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call double @llvm.vp.reduce.fadd.nxv4f64(double %s, %v, %m, i32 %evl) ret double %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv1i8( %v) ret i8 %red @@ -26,6 +27,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv1i8( %v) ret i8 %red @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv1i8( %v) ret i8 %red @@ -57,6 +60,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv1i8( %v) ret i8 %red @@ -73,6 +77,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv1i8( %v) ret i8 %red @@ -88,6 +93,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv1i8( %v) ret i8 %red @@ -103,6 +109,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv1i8( %v) ret i8 %red @@ -118,6 +125,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv1i8( %v) ret i8 %red @@ -133,6 +141,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv2i8( %v) ret i8 %red @@ -148,6 +157,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv2i8( %v) ret i8 %red @@ -164,6 +174,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv2i8( %v) ret i8 %red @@ -179,6 +190,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv2i8( %v) ret i8 %red @@ -195,6 +207,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv2i8( %v) ret i8 %red @@ -210,6 +223,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv2i8( %v) ret i8 %red @@ -225,6 +239,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv2i8( %v) ret i8 %red @@ -240,6 +255,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv2i8( %v) ret i8 %red @@ -255,6 +271,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv4i8( %v) ret i8 %red @@ -270,6 +287,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv4i8( %v) ret i8 %red @@ -286,6 +304,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv4i8( %v) ret i8 %red @@ -301,6 +320,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv4i8( %v) ret i8 %red @@ -317,6 +337,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv4i8( %v) ret i8 %red @@ -332,6 +353,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv4i8( %v) ret i8 %red @@ -347,6 +369,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv4i8( %v) ret i8 %red @@ -362,6 +385,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv4i8( %v) ret i8 %red @@ -377,6 +401,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv1i16( %v) ret i16 %red @@ -392,6 +417,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv1i16( %v) ret i16 %red @@ -408,6 +434,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv1i16( %v) ret i16 %red @@ -423,6 +450,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv1i16( %v) ret i16 %red @@ -440,6 +468,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv1i16( %v) ret i16 %red @@ -455,6 +484,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv1i16( %v) ret i16 %red @@ -470,6 +500,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv1i16( %v) ret i16 %red @@ -485,6 +516,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv1i16( %v) ret i16 %red @@ -500,6 +532,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv2i16( %v) ret i16 %red @@ -515,6 +548,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv2i16( %v) ret i16 %red @@ -531,6 +565,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv2i16( %v) ret i16 %red @@ -546,6 +581,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv2i16( %v) ret i16 %red @@ -563,6 +599,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv2i16( %v) ret i16 %red @@ -578,6 +615,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv2i16( %v) ret i16 %red @@ -593,6 +631,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv2i16( %v) ret i16 %red @@ -608,6 +647,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv2i16( %v) ret i16 %red @@ -622,6 +662,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv4i16( %v) ret i16 %red @@ -636,6 +677,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv4i16( %v) ret i16 %red @@ -651,6 +693,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv4i16( %v) ret i16 %red @@ -665,6 +708,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv4i16( %v) ret i16 %red @@ -681,6 +725,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv4i16( %v) ret i16 %red @@ -695,6 +740,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv4i16( %v) ret i16 %red @@ -709,6 +755,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv4i16( %v) ret i16 %red @@ -723,6 +770,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv4i16( %v) ret i16 %red @@ -738,6 +786,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv1i32( %v) ret i32 %red @@ -753,6 +802,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv1i32( %v) ret i32 %red @@ -769,6 +819,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv1i32( %v) ret i32 %red @@ -784,6 +835,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv1i32( %v) ret i32 %red @@ -801,6 +853,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv1i32( %v) ret i32 %red @@ -816,6 +869,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv1i32( %v) ret i32 %red @@ -831,6 +885,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv1i32( %v) ret i32 %red @@ -846,6 +901,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv1i32( %v) ret i32 %red @@ -860,6 +916,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv2i32( %v) ret i32 %red @@ -874,6 +931,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv2i32( %v) ret i32 %red @@ -889,6 +947,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv2i32( %v) ret i32 %red @@ -903,6 +962,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv2i32( %v) ret i32 %red @@ -919,6 +979,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv2i32( %v) ret i32 %red @@ -933,6 +994,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv2i32( %v) ret i32 %red @@ -947,6 +1009,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv2i32( %v) ret i32 %red @@ -961,6 +1024,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv2i32( %v) ret i32 %red @@ -976,6 +1040,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv4i32( %v) ret i32 %red @@ -991,6 +1056,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv4i32( %v) ret i32 %red @@ -1007,6 +1073,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv4i32( %v) ret i32 %red @@ -1022,6 +1089,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv4i32( %v) ret i32 %red @@ -1039,6 +1107,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv4i32( %v) ret i32 %red @@ -1054,6 +1123,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv4i32( %v) ret i32 %red @@ -1069,6 +1139,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv4i32( %v) ret i32 %red @@ -1084,6 +1155,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv4i32( %v) ret i32 %red @@ -1102,6 +1174,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv1i64( %v) ret i64 %red @@ -1120,6 +1193,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv1i64( %v) ret i64 %red @@ -1145,6 +1219,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv1i64( %v) ret i64 %red @@ -1163,6 +1238,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv1i64( %v) ret i64 %red @@ -1190,6 +1266,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv1i64( %v) ret i64 %red @@ -1208,6 +1285,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv1i64( %v) ret i64 %red @@ -1226,6 +1304,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv1i64( %v) ret i64 %red @@ -1244,6 +1323,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv1i64( %v) ret i64 %red @@ -1263,6 +1343,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv2i64( %v) ret i64 %red @@ -1282,6 +1363,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv2i64( %v) ret i64 %red @@ -1308,6 +1390,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv2i64( %v) ret i64 %red @@ -1327,6 +1410,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv2i64( %v) ret i64 %red @@ -1355,6 +1439,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv2i64( %v) ret i64 %red @@ -1374,6 +1459,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv2i64( %v) ret i64 %red @@ -1393,6 +1479,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv2i64( %v) ret i64 %red @@ -1412,6 +1499,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv2i64( %v) ret i64 %red @@ -1431,6 +1519,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv4i64( %v) ret i64 %red @@ -1450,6 +1539,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv4i64( %v) ret i64 %red @@ -1476,6 +1566,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv4i64( %v) ret i64 %red @@ -1495,6 +1586,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv4i64( %v) ret i64 %red @@ -1523,6 +1615,7 @@ ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv4i64( %v) ret i64 %red @@ -1542,6 +1635,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv4i64( %v) ret i64 %red @@ -1561,6 +1655,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv4i64( %v) ret i64 %red @@ -1580,6 +1675,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a1, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv4i64( %v) ret i64 %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv1i8( %v) ret i8 %red @@ -26,6 +27,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv1i8( %v) ret i8 %red @@ -42,6 +44,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv1i8( %v) ret i8 %red @@ -57,6 +60,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv1i8( %v) ret i8 %red @@ -73,6 +77,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv1i8( %v) ret i8 %red @@ -88,6 +93,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv1i8( %v) ret i8 %red @@ -103,6 +109,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv1i8( %v) ret i8 %red @@ -118,6 +125,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv1i8( %v) ret i8 %red @@ -133,6 +141,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv2i8( %v) ret i8 %red @@ -148,6 +157,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv2i8( %v) ret i8 %red @@ -164,6 +174,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv2i8( %v) ret i8 %red @@ -179,6 +190,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv2i8( %v) ret i8 %red @@ -195,6 +207,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv2i8( %v) ret i8 %red @@ -210,6 +223,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv2i8( %v) ret i8 %red @@ -225,6 +239,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv2i8( %v) ret i8 %red @@ -240,6 +255,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv2i8( %v) ret i8 %red @@ -255,6 +271,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.add.nxv4i8( %v) ret i8 %red @@ -270,6 +287,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umax.nxv4i8( %v) ret i8 %red @@ -286,6 +304,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smax.nxv4i8( %v) ret i8 %red @@ -301,6 +320,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.umin.nxv4i8( %v) ret i8 %red @@ -317,6 +337,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.smin.nxv4i8( %v) ret i8 %red @@ -332,6 +353,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.and.nxv4i8( %v) ret i8 %red @@ -347,6 +369,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.or.nxv4i8( %v) ret i8 %red @@ -362,6 +385,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i8 @llvm.vector.reduce.xor.nxv4i8( %v) ret i8 %red @@ -377,6 +401,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv1i16( %v) ret i16 %red @@ -392,6 +417,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv1i16( %v) ret i16 %red @@ -408,6 +434,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv1i16( %v) ret i16 %red @@ -423,6 +450,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv1i16( %v) ret i16 %red @@ -440,6 +468,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv1i16( %v) ret i16 %red @@ -455,6 +484,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv1i16( %v) ret i16 %red @@ -470,6 +500,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv1i16( %v) ret i16 %red @@ -485,6 +516,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv1i16( %v) ret i16 %red @@ -500,6 +532,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv2i16( %v) ret i16 %red @@ -515,6 +548,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv2i16( %v) ret i16 %red @@ -531,6 +565,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv2i16( %v) ret i16 %red @@ -546,6 +581,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv2i16( %v) ret i16 %red @@ -563,6 +599,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv2i16( %v) ret i16 %red @@ -578,6 +615,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv2i16( %v) ret i16 %red @@ -593,6 +631,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv2i16( %v) ret i16 %red @@ -608,6 +647,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv2i16( %v) ret i16 %red @@ -622,6 +662,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.add.nxv4i16( %v) ret i16 %red @@ -636,6 +677,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umax.nxv4i16( %v) ret i16 %red @@ -651,6 +693,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smax.nxv4i16( %v) ret i16 %red @@ -665,6 +708,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.umin.nxv4i16( %v) ret i16 %red @@ -681,6 +725,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.smin.nxv4i16( %v) ret i16 %red @@ -695,6 +740,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.and.nxv4i16( %v) ret i16 %red @@ -709,6 +755,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.or.nxv4i16( %v) ret i16 %red @@ -723,6 +770,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i16 @llvm.vector.reduce.xor.nxv4i16( %v) ret i16 %red @@ -738,6 +786,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv1i32( %v) ret i32 %red @@ -753,6 +802,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv1i32( %v) ret i32 %red @@ -769,6 +819,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv1i32( %v) ret i32 %red @@ -784,6 +835,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv1i32( %v) ret i32 %red @@ -801,6 +853,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv1i32( %v) ret i32 %red @@ -816,6 +869,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv1i32( %v) ret i32 %red @@ -831,6 +885,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv1i32( %v) ret i32 %red @@ -846,6 +901,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv1i32( %v) ret i32 %red @@ -860,6 +916,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv2i32( %v) ret i32 %red @@ -874,6 +931,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv2i32( %v) ret i32 %red @@ -889,6 +947,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv2i32( %v) ret i32 %red @@ -903,6 +962,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv2i32( %v) ret i32 %red @@ -919,6 +979,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv2i32( %v) ret i32 %red @@ -933,6 +994,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv2i32( %v) ret i32 %red @@ -947,6 +1009,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv2i32( %v) ret i32 %red @@ -961,6 +1024,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv2i32( %v) ret i32 %red @@ -976,6 +1040,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.add.nxv4i32( %v) ret i32 %red @@ -991,6 +1056,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umax.nxv4i32( %v) ret i32 %red @@ -1007,6 +1073,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smax.nxv4i32( %v) ret i32 %red @@ -1022,6 +1089,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.umin.nxv4i32( %v) ret i32 %red @@ -1039,6 +1107,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.smin.nxv4i32( %v) ret i32 %red @@ -1054,6 +1123,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.and.nxv4i32( %v) ret i32 %red @@ -1069,6 +1139,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.or.nxv4i32( %v) ret i32 %red @@ -1084,6 +1155,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i32 @llvm.vector.reduce.xor.nxv4i32( %v) ret i32 %red @@ -1098,6 +1170,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv1i64( %v) ret i64 %red @@ -1112,6 +1185,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv1i64( %v) ret i64 %red @@ -1128,6 +1202,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv1i64( %v) ret i64 %red @@ -1142,6 +1217,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv1i64( %v) ret i64 %red @@ -1158,6 +1234,7 @@ ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv1i64( %v) ret i64 %red @@ -1172,6 +1249,7 @@ ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv1i64( %v) ret i64 %red @@ -1186,6 +1264,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv1i64( %v) ret i64 %red @@ -1200,6 +1279,7 @@ ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv1i64( %v) ret i64 %red @@ -1215,6 +1295,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv2i64( %v) ret i64 %red @@ -1230,6 +1311,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv2i64( %v) ret i64 %red @@ -1247,6 +1329,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv2i64( %v) ret i64 %red @@ -1262,6 +1345,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv2i64( %v) ret i64 %red @@ -1279,6 +1363,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv2i64( %v) ret i64 %red @@ -1294,6 +1379,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv2i64( %v) ret i64 %red @@ -1309,6 +1395,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv2i64( %v) ret i64 %red @@ -1324,6 +1411,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv2i64( %v) ret i64 %red @@ -1339,6 +1427,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv4i64( %v) ret i64 %red @@ -1354,6 +1443,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv4i64( %v) ret i64 %red @@ -1371,6 +1461,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv4i64( %v) ret i64 %red @@ -1386,6 +1477,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv4i64( %v) ret i64 %red @@ -1403,6 +1495,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv4i64( %v) ret i64 %red @@ -1418,6 +1511,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv4i64( %v) ret i64 %red @@ -1433,6 +1527,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv4i64( %v) ret i64 %red @@ -1448,6 +1543,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv4i64( %v) ret i64 %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -30,6 +31,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -45,6 +47,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -61,6 +64,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -76,6 +80,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -91,6 +96,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -106,6 +112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -121,6 +128,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.nxv1i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -136,6 +144,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -152,6 +161,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -167,6 +177,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -183,6 +194,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -198,6 +210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -213,6 +226,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -228,6 +242,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -243,6 +258,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.nxv2i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -258,6 +274,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.add.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -274,6 +291,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umax.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -289,6 +307,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smax.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -305,6 +324,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.umin.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -320,6 +340,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.smin.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -335,6 +356,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.and.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -350,6 +372,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.or.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -365,6 +388,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i8 @llvm.vp.reduce.xor.nxv4i8(i8 %s, %v, %m, i32 %evl) ret i8 %r @@ -380,6 +404,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -398,6 +423,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv1i16: @@ -410,6 +436,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -425,6 +452,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -443,6 +471,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv1i16: @@ -455,6 +484,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -470,6 +500,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -485,6 +516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -500,6 +532,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -515,6 +548,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.nxv1i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -530,6 +564,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -548,6 +583,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv2i16: @@ -560,6 +596,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -575,6 +612,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -593,6 +631,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv2i16: @@ -605,6 +644,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -620,6 +660,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -635,6 +676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -650,6 +692,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -665,6 +708,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.nxv2i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -680,6 +724,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.add.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -698,6 +743,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv4i16: @@ -710,6 +756,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umax.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -725,6 +772,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smax.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -743,6 +791,7 @@ ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv4i16: @@ -755,6 +804,7 @@ ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i16 @llvm.vp.reduce.umin.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -770,6 +820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.smin.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -785,6 +836,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.and.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -800,6 +852,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.or.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -815,6 +868,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i16 @llvm.vp.reduce.xor.nxv4i16(i16 %s, %v, %m, i32 %evl) ret i16 %r @@ -830,6 +884,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -845,6 +900,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv1i32: @@ -856,6 +912,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -871,6 +928,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -886,6 +944,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv1i32: @@ -897,6 +956,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -912,6 +972,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -927,6 +988,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -942,6 +1004,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -957,6 +1020,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.nxv1i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -972,6 +1036,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -987,6 +1052,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv2i32: @@ -998,6 +1064,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1013,6 +1080,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1028,6 +1096,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv2i32: @@ -1039,6 +1108,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1054,6 +1124,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1069,6 +1140,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1084,6 +1156,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1099,6 +1172,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.nxv2i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1114,6 +1188,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredsum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.add.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1129,6 +1204,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv4i32: @@ -1140,6 +1216,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1155,6 +1232,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmax.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1170,6 +1248,7 @@ ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv4i32: @@ -1181,6 +1260,7 @@ ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1196,6 +1276,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmin.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1211,6 +1292,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredand.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.and.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1226,6 +1308,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.or.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1241,6 +1324,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredxor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 %s, %v, %m, i32 %evl) ret i32 %r @@ -1266,6 +1350,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_nxv1i64: @@ -1275,6 +1360,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1300,6 +1386,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv1i64: @@ -1309,6 +1396,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1334,6 +1422,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_nxv1i64: @@ -1343,6 +1432,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1368,6 +1458,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv1i64: @@ -1377,6 +1468,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1402,6 +1494,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_nxv1i64: @@ -1411,6 +1504,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1436,6 +1530,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_nxv1i64: @@ -1445,6 +1540,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1470,6 +1566,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_nxv1i64: @@ -1479,6 +1576,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1504,6 +1602,7 @@ ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_nxv1i64: @@ -1513,6 +1612,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.nxv1i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1538,6 +1638,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_nxv2i64: @@ -1547,6 +1648,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1572,6 +1674,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv2i64: @@ -1581,6 +1684,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1606,6 +1710,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_nxv2i64: @@ -1615,6 +1720,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1640,6 +1746,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv2i64: @@ -1649,6 +1756,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1674,6 +1782,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_nxv2i64: @@ -1683,6 +1792,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1708,6 +1818,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_nxv2i64: @@ -1717,6 +1828,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1742,6 +1854,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_nxv2i64: @@ -1751,6 +1864,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1776,6 +1890,7 @@ ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_nxv2i64: @@ -1785,6 +1900,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.nxv2i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1810,6 +1926,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_add_nxv4i64: @@ -1819,6 +1936,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.add.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1844,6 +1962,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umax_nxv4i64: @@ -1853,6 +1972,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umax.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1878,6 +1998,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smax_nxv4i64: @@ -1887,6 +2008,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smax.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1912,6 +2034,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_umin_nxv4i64: @@ -1921,6 +2044,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.umin.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1946,6 +2070,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_smin_nxv4i64: @@ -1955,6 +2080,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.smin.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -1980,6 +2106,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_and_nxv4i64: @@ -1989,6 +2116,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredand.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.and.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -2014,6 +2142,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_or_nxv4i64: @@ -2023,6 +2152,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.or.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r @@ -2048,6 +2178,7 @@ ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vpreduce_xor_nxv4i64: @@ -2057,6 +2188,7 @@ ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %r = call i64 @llvm.vp.reduce.xor.nxv4i64(i64 %s, %v, %m, i32 %evl) ret i64 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv1i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -32,6 +33,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv1i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -49,6 +51,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv1i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -66,6 +69,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv2i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -84,6 +88,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv2i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -101,6 +106,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv2i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -118,6 +124,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv4i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -136,6 +143,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv4i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -153,6 +161,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv4i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -170,6 +179,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv8i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -188,6 +198,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv8i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -205,6 +216,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv8i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -222,6 +234,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv16i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -240,6 +253,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv16i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -257,6 +271,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv16i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -274,6 +289,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv32i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -292,6 +308,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv32i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -309,6 +326,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv32i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -326,6 +344,7 @@ ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.and.nxv64i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -344,6 +363,7 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.or.nxv64i1(i1 %s, %v, %m, i32 %evl) ret i1 %r @@ -361,6 +381,7 @@ ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call i1 @llvm.vp.reduce.xor.nxv64i1(i1 %s, %v, %m, i32 %evl) ret i1 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -11,6 +11,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv1i1( %v) ret i1 %red @@ -25,6 +26,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv1i1( %v) ret i1 %red @@ -40,6 +42,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv1i1( %v) ret i1 %red @@ -54,6 +57,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv2i1( %v) ret i1 %red @@ -68,6 +72,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv2i1( %v) ret i1 %red @@ -83,6 +88,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv2i1( %v) ret i1 %red @@ -97,6 +103,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv4i1( %v) ret i1 %red @@ -111,6 +118,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv4i1( %v) ret i1 %red @@ -126,6 +134,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv4i1( %v) ret i1 %red @@ -140,6 +149,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv8i1( %v) ret i1 %red @@ -154,6 +164,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv8i1( %v) ret i1 %red @@ -169,6 +180,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv8i1( %v) ret i1 %red @@ -183,6 +195,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv16i1( %v) ret i1 %red @@ -197,6 +210,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv16i1( %v) ret i1 %red @@ -212,6 +226,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv16i1( %v) ret i1 %red @@ -226,6 +241,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv32i1( %v) ret i1 %red @@ -240,6 +256,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv32i1( %v) ret i1 %red @@ -255,6 +272,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv32i1( %v) ret i1 %red @@ -269,6 +287,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.nxv64i1( %v) ret i1 %red @@ -283,6 +302,7 @@ ; CHECK-NEXT: vpopc.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.nxv64i1( %v) ret i1 %red @@ -298,6 +318,7 @@ ; CHECK-NEXT: vpopc.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.nxv64i1( %v) ret i1 %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -36,6 +38,7 @@ ; CHECK-NEXT: vadd.vv v9, v9, v10 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -77,6 +82,7 @@ ; CHECK-NEXT: vadd.vv v9, v9, v10 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -118,6 +126,7 @@ ; CHECK-NEXT: vadd.vv v9, v9, v10 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -130,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -140,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +170,7 @@ ; CHECK-NEXT: vadd.vv v9, v9, v10 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -181,6 +194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -200,6 +214,7 @@ ; CHECK-NEXT: vadd.vv v10, v10, v12 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -212,6 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -222,6 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -241,6 +258,7 @@ ; CHECK-NEXT: vadd.vv v12, v12, v16 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -253,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -263,6 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -282,6 +302,7 @@ ; CHECK-NEXT: vadd.vv v16, v16, v24 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -294,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -304,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -323,6 +346,7 @@ ; RV32-NEXT: vadd.vv v9, v9, v10 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv1i16_0: @@ -336,6 +360,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -348,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -358,6 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -377,6 +404,7 @@ ; RV32-NEXT: vadd.vv v9, v9, v10 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv2i16_0: @@ -390,6 +418,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -402,6 +431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -412,6 +442,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -431,6 +462,7 @@ ; RV32-NEXT: vadd.vv v9, v9, v10 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv4i16_0: @@ -444,6 +476,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -456,6 +489,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -466,6 +500,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -485,6 +520,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v12 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv8i16_0: @@ -498,6 +534,7 @@ ; RV64-NEXT: vadd.vv v10, v10, v12 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -510,6 +547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -520,6 +558,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +578,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v16 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv16i16_0: @@ -552,6 +592,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v16 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -564,6 +605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -574,6 +616,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -593,6 +636,7 @@ ; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv32i16_0: @@ -606,6 +650,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v24 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -618,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -628,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -648,6 +695,7 @@ ; RV32-NEXT: vadd.vv v9, v9, v10 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv1i32_0: @@ -662,6 +710,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -674,6 +723,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -684,6 +734,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -704,6 +755,7 @@ ; RV32-NEXT: vadd.vv v9, v9, v10 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv2i32_0: @@ -718,6 +770,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -730,6 +783,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -740,6 +794,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -760,6 +815,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v12 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv4i32_0: @@ -774,6 +830,7 @@ ; RV64-NEXT: vadd.vv v10, v10, v12 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -786,6 +843,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -796,6 +854,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -816,6 +875,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v16 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv8i32_0: @@ -830,6 +890,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v16 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -842,6 +903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -852,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -872,6 +935,7 @@ ; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv16i32_0: @@ -886,6 +950,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v24 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -898,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -915,12 +981,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -950,6 +1018,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv1i64_0: @@ -970,6 +1039,7 @@ ; RV64-NEXT: vadd.vv v9, v9, v10 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -982,6 +1052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -999,12 +1070,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1034,6 +1107,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv2i64_0: @@ -1054,6 +1128,7 @@ ; RV64-NEXT: vadd.vv v10, v10, v12 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1066,6 +1141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -1083,12 +1159,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1118,6 +1196,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv4i64_0: @@ -1138,6 +1217,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v16 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1150,6 +1230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = srem %va, %vb ret %vc @@ -1167,12 +1248,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1202,6 +1285,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vi_nxv8i64_0: @@ -1222,6 +1306,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v24 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -17,6 +17,7 @@ ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -31,6 +32,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -41,6 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -53,6 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -81,6 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -91,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -115,6 +123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -131,6 +140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv3i8( %va, %b, %m, i32 %evl) ret %v @@ -143,6 +153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -153,6 +164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -165,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -193,6 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -203,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -243,6 +261,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -253,6 +272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -265,6 +285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -277,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -315,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -327,6 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -343,6 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -353,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -365,6 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -377,6 +406,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -393,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -403,6 +434,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -415,6 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +460,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -453,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -465,6 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -477,6 +514,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -493,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -503,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -515,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -527,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -543,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -553,6 +596,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -565,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -577,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -593,6 +639,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -603,6 +650,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -615,6 +663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -627,6 +676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +693,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -653,6 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -665,6 +717,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -677,6 +730,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -693,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -703,6 +758,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -715,6 +771,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -727,6 +784,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +801,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -753,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -765,6 +825,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -777,6 +838,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -793,6 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -803,6 +866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -815,6 +879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -827,6 +892,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -843,6 +909,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -853,6 +920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -865,6 +933,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -877,6 +946,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -893,6 +963,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -903,6 +974,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -915,6 +987,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -927,6 +1000,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -943,6 +1017,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -953,6 +1028,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -973,12 +1049,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -999,12 +1077,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1021,6 +1101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1031,6 +1112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1051,12 +1133,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1077,12 +1161,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1099,6 +1185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1109,6 +1196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1129,12 +1217,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1155,12 +1245,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1177,6 +1269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.srem.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1187,6 +1280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1207,12 +1301,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1233,12 +1329,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrem.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -33,6 +35,7 @@ ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -71,6 +76,7 @@ ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +117,7 @@ ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -121,6 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -131,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +158,7 @@ ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -185,6 +199,7 @@ ; CHECK-NEXT: vsrl.vi v10, v10, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -207,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +240,7 @@ ; CHECK-NEXT: vsrl.vi v12, v12, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -245,6 +264,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -261,6 +281,7 @@ ; CHECK-NEXT: vsrl.vi v16, v16, 5 ; CHECK-NEXT: addi a0, zero, -7 ; CHECK-NEXT: vnmsac.vx v8, a0, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -283,6 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -300,6 +323,7 @@ ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv1i16_0: @@ -311,6 +335,7 @@ ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -323,6 +348,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -333,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -350,6 +377,7 @@ ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv2i16_0: @@ -361,6 +389,7 @@ ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -373,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -383,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -400,6 +431,7 @@ ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv4i16_0: @@ -411,6 +443,7 @@ ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -423,6 +456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -433,6 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -450,6 +485,7 @@ ; RV32-NEXT: vsrl.vi v10, v10, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv8i16_0: @@ -461,6 +497,7 @@ ; RV64-NEXT: vsrl.vi v10, v10, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -473,6 +510,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -483,6 +521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -500,6 +539,7 @@ ; RV32-NEXT: vsrl.vi v12, v12, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv16i16_0: @@ -511,6 +551,7 @@ ; RV64-NEXT: vsrl.vi v12, v12, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -523,6 +564,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -533,6 +575,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -550,6 +593,7 @@ ; RV32-NEXT: vsrl.vi v16, v16, 13 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv32i16_0: @@ -561,6 +605,7 @@ ; RV64-NEXT: vsrl.vi v16, v16, 13 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i16 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -573,6 +618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -583,6 +629,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -600,6 +647,7 @@ ; RV32-NEXT: vsrl.vi v9, v9, 29 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv1i32_0: @@ -611,6 +659,7 @@ ; RV64-NEXT: vsrl.vi v9, v9, 29 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -623,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -633,6 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -650,6 +701,7 @@ ; RV32-NEXT: vsrl.vi v9, v9, 29 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv2i32_0: @@ -661,6 +713,7 @@ ; RV64-NEXT: vsrl.vi v9, v9, 29 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -673,6 +726,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -683,6 +737,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -700,6 +755,7 @@ ; RV32-NEXT: vsrl.vi v10, v10, 29 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv4i32_0: @@ -711,6 +767,7 @@ ; RV64-NEXT: vsrl.vi v10, v10, 29 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -723,6 +780,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -733,6 +791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -750,6 +809,7 @@ ; RV32-NEXT: vsrl.vi v12, v12, 29 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv8i32_0: @@ -761,6 +821,7 @@ ; RV64-NEXT: vsrl.vi v12, v12, 29 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -773,6 +834,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -783,6 +845,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -800,6 +863,7 @@ ; RV32-NEXT: vsrl.vi v16, v16, 29 ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv16i32_0: @@ -811,6 +875,7 @@ ; RV64-NEXT: vsrl.vi v16, v16, 29 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i32 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -823,6 +888,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -840,12 +906,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -871,6 +939,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv1i64_0: @@ -884,6 +953,7 @@ ; RV64-NEXT: vsrl.vx v9, v9, a0 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v9 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -897,6 +967,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,6 +985,7 @@ ; CHECK-NEXT: vsll.vv v9, v10, v9 ; CHECK-NEXT: vadd.vi v9, v9, -1 ; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -927,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -944,12 +1017,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -975,6 +1050,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv2i64_0: @@ -988,6 +1064,7 @@ ; RV64-NEXT: vsrl.vx v10, v10, a0 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v10 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1001,6 +1078,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1018,6 +1096,7 @@ ; CHECK-NEXT: vsll.vv v10, v12, v10 ; CHECK-NEXT: vadd.vi v10, v10, -1 ; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1031,6 +1110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -1048,12 +1128,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1079,6 +1161,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv4i64_0: @@ -1092,6 +1175,7 @@ ; RV64-NEXT: vsrl.vx v12, v12, a0 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v12 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1105,6 +1189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1122,6 +1207,7 @@ ; CHECK-NEXT: vsll.vv v12, v16, v12 ; CHECK-NEXT: vadd.vi v12, v12, -1 ; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1135,6 +1221,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = urem %va, %vb ret %vc @@ -1152,12 +1239,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1183,6 +1272,7 @@ ; RV32-NEXT: addi a0, zero, -7 ; RV32-NEXT: vnmsac.vx v8, a0, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vi_nxv8i64_0: @@ -1196,6 +1286,7 @@ ; RV64-NEXT: vsrl.vx v16, v16, a0 ; RV64-NEXT: addi a0, zero, -7 ; RV64-NEXT: vnmsac.vx v8, a0, v16 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 -7, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1209,6 +1300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1226,6 +1318,7 @@ ; CHECK-NEXT: vsll.vv v16, v24, v16 ; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a2 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -30,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -40,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -52,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -64,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -80,6 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -90,6 +96,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -102,6 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -114,6 +122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -130,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv3i8( %va, %b, %m, i32 %evl) ret %v @@ -142,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -152,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -164,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -176,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -192,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -202,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -214,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -226,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -242,6 +260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -252,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -264,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -276,6 +297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -292,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -302,6 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -314,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -326,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -342,6 +368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -352,6 +379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -364,6 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -376,6 +405,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -392,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -402,6 +433,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -414,6 +446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -426,6 +459,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -442,6 +476,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -452,6 +487,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -464,6 +500,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -476,6 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -492,6 +530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -502,6 +541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -514,6 +554,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -526,6 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -542,6 +584,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -552,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -564,6 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -576,6 +621,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -592,6 +638,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -602,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -614,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -626,6 +675,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -642,6 +692,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -652,6 +703,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -664,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -676,6 +729,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -692,6 +746,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -702,6 +757,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -714,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -726,6 +783,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -742,6 +800,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -752,6 +811,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -764,6 +824,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -776,6 +837,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -792,6 +854,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -802,6 +865,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -814,6 +878,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -826,6 +891,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -842,6 +908,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -852,6 +919,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -864,6 +932,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -876,6 +945,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -892,6 +962,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -902,6 +973,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -914,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -926,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -942,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -952,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -972,12 +1048,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -998,12 +1076,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1020,6 +1100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1030,6 +1111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1050,12 +1132,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1076,12 +1160,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1098,6 +1184,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1108,6 +1195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1128,12 +1216,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1154,12 +1244,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1176,6 +1268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.urem.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1186,6 +1279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vremu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1206,12 +1300,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1232,12 +1328,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vremu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -43,6 +46,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -67,6 +72,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -79,6 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -91,6 +98,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -103,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -115,6 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -139,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -151,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -163,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -187,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -199,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -247,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -259,6 +280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -271,6 +293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -283,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -295,6 +319,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -307,6 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +345,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -331,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -343,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -355,6 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -367,6 +397,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -379,6 +410,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -391,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -403,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -415,6 +449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -427,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -446,12 +482,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -464,6 +502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -483,12 +522,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -520,12 +562,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -557,12 +602,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -575,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, -4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -4, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -23,6 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -37,6 +39,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -49,6 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -91,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -103,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -131,6 +140,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -145,6 +155,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -157,6 +168,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -173,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -185,6 +198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -199,6 +213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -211,6 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -239,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -253,6 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -265,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -281,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -307,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -347,6 +372,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -361,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -373,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -389,6 +417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -401,6 +430,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -415,6 +445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -427,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -455,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -469,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +533,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -509,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -523,6 +561,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +591,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -563,6 +604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -577,6 +619,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +632,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -605,6 +649,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -617,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -631,6 +677,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -643,6 +690,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -671,6 +720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -685,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -725,6 +778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -739,6 +793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -751,6 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -767,6 +823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -779,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -793,6 +851,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -805,6 +864,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -821,6 +881,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -833,6 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -847,6 +909,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -859,6 +922,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -875,6 +939,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -887,6 +952,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -901,6 +967,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -913,6 +980,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -929,6 +997,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -941,6 +1010,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -955,6 +1025,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -967,6 +1038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -991,12 +1063,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1017,12 +1091,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1037,6 +1113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1049,6 +1126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1073,12 +1151,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1099,12 +1179,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1119,6 +1201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1131,6 +1214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1155,12 +1239,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1181,12 +1267,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1201,6 +1289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1213,6 +1302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1237,12 +1327,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1263,12 +1355,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vrsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1283,6 +1377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1295,6 +1390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i8( %va, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i8( %va, %b) ret %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i8( %va, %b) ret %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -105,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i8( %va, %b) ret %v @@ -129,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i8( %va, %b) ret %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv32i8( %va, %b) ret %v @@ -201,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv64i8( %va, %b) ret %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -249,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i16( %va, %b) ret %v @@ -273,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i16( %va, %b) ret %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -321,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i16( %va, %b) ret %v @@ -345,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -371,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i16( %va, %b) ret %v @@ -381,6 +412,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -393,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -407,6 +440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i16( %va, %b) ret %v @@ -417,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -429,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv32i16( %va, %b) ret %v @@ -453,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -465,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -479,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i32( %va, %b) ret %v @@ -489,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -501,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i32( %va, %b) ret %v @@ -525,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -537,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +596,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i32( %va, %b) ret %v @@ -561,6 +607,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -573,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -587,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i32( %va, %b) ret %v @@ -597,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -623,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i32( %va, %b) ret %v @@ -633,6 +685,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -645,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +713,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i64( %va, %b) ret %v @@ -676,12 +731,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_nxv1i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -694,6 +751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -708,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i64( %va, %b) ret %v @@ -725,12 +784,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_nxv2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -757,6 +819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i64( %va, %b) ret %v @@ -774,12 +837,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_nxv4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -792,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -806,6 +872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i64( %va, %b) ret %v @@ -823,12 +890,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: sadd_nxv8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsadd.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -841,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsadd.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i8( %va, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -33,6 +35,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i8( %va, %b) ret %v @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -69,6 +74,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -83,6 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i8( %va, %b) ret %v @@ -93,6 +100,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -105,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -119,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i8( %va, %b) ret %v @@ -129,6 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -141,6 +152,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -155,6 +167,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i8( %va, %b) ret %v @@ -165,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -177,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -191,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv32i8( %va, %b) ret %v @@ -201,6 +217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -213,6 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -227,6 +245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv64i8( %va, %b) ret %v @@ -237,6 +256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -249,6 +269,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -263,6 +284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i16( %va, %b) ret %v @@ -273,6 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -285,6 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -299,6 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i16( %va, %b) ret %v @@ -309,6 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -321,6 +347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +362,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i16( %va, %b) ret %v @@ -345,6 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -357,6 +386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -371,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i16( %va, %b) ret %v @@ -381,6 +412,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -393,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -407,6 +440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i16( %va, %b) ret %v @@ -417,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -429,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -443,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv32i16( %va, %b) ret %v @@ -453,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -465,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -479,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i32( %va, %b) ret %v @@ -489,6 +529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -501,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i32( %va, %b) ret %v @@ -525,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -537,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -551,6 +596,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i32( %va, %b) ret %v @@ -561,6 +607,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -573,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -587,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i32( %va, %b) ret %v @@ -597,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -623,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i32( %va, %b) ret %v @@ -633,6 +685,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -645,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +713,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i64( %va, %b) ret %v @@ -676,12 +731,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_nxv1i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -694,6 +751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -708,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i64( %va, %b) ret %v @@ -725,12 +784,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_nxv2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -743,6 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -757,6 +819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i64( %va, %b) ret %v @@ -774,12 +837,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_nxv4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -792,6 +857,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -806,6 +872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i64( %va, %b) ret %v @@ -823,12 +890,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uadd_nxv8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsaddu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -841,6 +910,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsaddu.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 8, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -51,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -61,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -73,6 +79,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -83,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -95,6 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -105,6 +114,7 @@ define @vmerge_truelhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -116,6 +126,7 @@ ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc @@ -126,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -136,6 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -148,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -158,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -170,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -180,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -192,6 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -202,6 +220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -214,6 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -224,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -236,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -246,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -258,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -270,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -280,6 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -302,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -314,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -324,6 +353,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -346,6 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -358,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -368,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +460,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cond = icmp eq %va, zeroinitializer %sel = select %cond, %vb, zeroinitializer @@ -450,6 +486,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cond = and %ma, %mb %sel = select %cond, %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -39,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -51,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -61,6 +66,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -73,6 +79,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -83,6 +90,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -95,6 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -105,6 +114,7 @@ define @vmerge_truelhs_nxv8f16_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -116,6 +126,7 @@ ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc @@ -126,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -136,6 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -148,6 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -158,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -170,6 +185,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -180,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -192,6 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -202,6 +220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -214,6 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -224,6 +244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -236,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -246,6 +268,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -258,6 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -270,6 +294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -280,6 +305,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -292,6 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -302,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -314,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -324,6 +353,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -346,6 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -358,6 +390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -368,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, double zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,6 +460,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cond = icmp eq %va, zeroinitializer %sel = select %cond, %vb, zeroinitializer @@ -450,6 +486,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vs8r.v v16, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %cond = and %ma, %mb %sel = select %cond, %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll @@ -6,6 +6,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -16,6 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -28,6 +30,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -40,6 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -50,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -62,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -74,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -84,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -96,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -118,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -130,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -142,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -152,6 +165,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -164,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -176,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -186,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -198,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -210,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -220,6 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -232,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -244,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -254,6 +276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -266,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -278,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -288,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -300,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -312,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -322,6 +350,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -334,6 +363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -346,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -356,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -368,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -390,6 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -402,6 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -414,6 +450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -424,6 +461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -436,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -448,6 +487,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -458,6 +498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -470,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -482,6 +524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -492,6 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -504,6 +548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -516,6 +561,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -526,6 +572,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -550,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -560,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -572,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -584,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -594,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -606,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -618,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -635,6 +690,7 @@ ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -647,6 +703,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -659,6 +716,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -676,6 +734,7 @@ ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -688,6 +747,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -700,6 +760,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -717,6 +778,7 @@ ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -729,6 +791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -741,6 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -758,6 +822,7 @@ ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -770,6 +835,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -780,6 +846,7 @@ define @vmerge_truelhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -791,6 +858,7 @@ ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll @@ -6,6 +6,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -16,6 +17,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -28,6 +30,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -40,6 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -50,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -62,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -74,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -84,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -96,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -108,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -118,6 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -130,6 +141,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -142,6 +154,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -152,6 +165,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -164,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -176,6 +191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -186,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -198,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -210,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -220,6 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -232,6 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -244,6 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -254,6 +276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -266,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -278,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -288,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -300,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -312,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -322,6 +350,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -334,6 +363,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -346,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -356,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -368,6 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -380,6 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -390,6 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -402,6 +437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -414,6 +450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -424,6 +461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -436,6 +474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -448,6 +487,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -458,6 +498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -470,6 +511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -482,6 +524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -492,6 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -504,6 +548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -516,6 +561,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -526,6 +572,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -550,6 +598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -560,6 +609,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -572,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -584,6 +635,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -594,6 +646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -606,6 +659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -618,6 +672,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -628,6 +683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +696,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -652,6 +709,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -662,6 +720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -674,6 +733,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -686,6 +746,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -696,6 +757,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -708,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -720,6 +783,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb ret %vc @@ -730,6 +794,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -742,6 +807,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 3, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -752,6 +818,7 @@ define @vmerge_truelhs_nxv8i64_0( %va, %vb) { ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0: ; CHECK: # %bb.0: +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -763,6 +830,7 @@ ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = select zeroinitializer, %va, %vb ret %vc diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll @@ -9,6 +9,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -21,6 +22,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -33,6 +35,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -45,6 +48,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -57,6 +61,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -69,6 +74,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v @@ -81,6 +87,7 @@ ; CHECK-NEXT: vmandnot.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = select %cc, %a, %b ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll @@ -8,6 +8,7 @@ ; CHECK-LABEL: vsetvl_sext: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, mu +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1) %b = trunc i64 %a to i32 @@ -18,6 +19,7 @@ ; CHECK-LABEL: vsetvl_zext: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, mu +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1) %b = trunc i64 %a to i32 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -463,6 +463,7 @@ ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: bnez a4, .LBB8_2 ; CHECK-NEXT: .LBB8_3: # %for.end +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: vs4r.v v12, (a0) ; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %index = add %x, %x store %index, * %y diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -43,6 +46,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -67,6 +72,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -79,6 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -91,6 +98,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -103,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -115,6 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -139,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -151,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -163,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -187,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -199,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -247,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -259,6 +280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -271,6 +293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -283,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -295,6 +319,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -307,6 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +345,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -331,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -343,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -355,6 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -367,6 +397,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -379,6 +410,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -391,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -403,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -415,6 +449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -427,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -439,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -451,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -464,6 +502,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -476,6 +515,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -500,6 +541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -513,6 +555,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -525,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -537,6 +581,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -549,6 +594,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -562,6 +608,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -574,6 +621,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -586,6 +634,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -598,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -611,6 +661,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -623,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -15,6 +15,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -29,6 +30,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -39,6 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -51,6 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -63,6 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -77,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -89,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -105,6 +112,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -115,6 +123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -139,6 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -153,6 +164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -165,6 +177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -181,6 +194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -191,6 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -203,6 +218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -215,6 +231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -229,6 +246,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -241,6 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -257,6 +276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv5i8( %va, %b, %m, i32 %evl) ret %v @@ -269,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -279,6 +300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -291,6 +313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -317,6 +341,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -329,6 +354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -355,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -367,6 +395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -379,6 +408,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -393,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -421,6 +453,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -431,6 +464,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -443,6 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -455,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -469,6 +505,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -497,6 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -507,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -519,6 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -531,6 +572,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -545,6 +587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -573,6 +617,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -583,6 +628,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -595,6 +641,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -607,6 +654,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -621,6 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -633,6 +682,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -649,6 +699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -659,6 +710,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -671,6 +723,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -683,6 +736,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -709,6 +764,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -725,6 +781,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -735,6 +792,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -747,6 +805,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -759,6 +818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -773,6 +833,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -785,6 +846,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -801,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -811,6 +874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -823,6 +887,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -835,6 +900,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -849,6 +915,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -861,6 +928,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -877,6 +945,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -887,6 +956,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -899,6 +969,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -911,6 +982,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -925,6 +997,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -937,6 +1010,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -953,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -963,6 +1038,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -975,6 +1051,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -987,6 +1064,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1001,6 +1079,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1013,6 +1092,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1029,6 +1109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1039,6 +1120,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1051,6 +1133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1063,6 +1146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1077,6 +1161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1089,6 +1174,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1105,6 +1191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1115,6 +1202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1127,6 +1215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1139,6 +1228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1153,6 +1243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1165,6 +1256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1181,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1191,6 +1284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1203,6 +1297,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1215,6 +1310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1229,6 +1325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1241,6 +1338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1257,6 +1355,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1267,6 +1366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1279,6 +1379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1291,6 +1392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1305,6 +1407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1317,6 +1420,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1333,6 +1437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1343,6 +1448,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1355,6 +1461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1367,6 +1474,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1381,6 +1489,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1393,6 +1502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1409,6 +1519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1419,6 +1530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1431,12 +1543,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1449,12 +1563,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1469,6 +1585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1481,6 +1598,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1497,6 +1615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1507,6 +1626,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1519,12 +1639,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1537,12 +1659,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1557,6 +1681,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1569,6 +1694,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1585,6 +1711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1595,6 +1722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1607,12 +1735,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1625,12 +1755,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1645,6 +1777,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1657,6 +1790,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1673,6 +1807,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.shl.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1683,6 +1818,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1695,12 +1831,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1713,12 +1851,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsll.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsll.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1733,6 +1873,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1745,6 +1886,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsll.vi v8, v8, 3 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 3, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) @@ -27,6 +28,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) @@ -58,6 +61,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -74,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) @@ -87,6 +92,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -104,6 +110,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -135,6 +143,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) @@ -149,6 +158,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -166,6 +176,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) @@ -180,6 +191,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -197,6 +209,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -211,6 +224,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -228,6 +242,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -242,6 +257,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -259,6 +275,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -273,6 +290,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -291,6 +309,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -306,6 +325,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -324,6 +344,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -339,6 +360,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -357,6 +379,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -372,6 +395,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -391,6 +415,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -407,6 +432,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -426,6 +452,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -442,6 +469,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -461,6 +489,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -477,6 +506,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -497,6 +527,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -514,6 +545,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -534,6 +566,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -551,6 +584,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -571,6 +605,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -588,6 +623,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -609,6 +645,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -627,6 +664,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -648,6 +686,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -666,6 +705,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -687,6 +727,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -705,6 +746,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -727,6 +769,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -746,6 +789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -768,6 +812,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -787,6 +832,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -809,6 +855,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -828,6 +875,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -844,6 +892,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) @@ -857,6 +906,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -874,6 +924,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) @@ -888,6 +939,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -904,6 +956,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) @@ -917,6 +970,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -934,6 +988,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -948,6 +1003,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -965,6 +1021,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -979,6 +1036,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -996,6 +1054,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1010,6 +1069,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1028,6 +1088,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1043,6 +1104,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1061,6 +1123,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1076,6 +1139,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1094,6 +1158,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1109,6 +1174,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1126,6 +1192,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) @@ -1140,6 +1207,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1157,6 +1225,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) @@ -1171,6 +1240,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1188,6 +1258,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) @@ -1202,6 +1273,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1219,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1233,6 +1306,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1250,6 +1324,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1264,6 +1339,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1281,6 +1357,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1295,6 +1372,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1313,6 +1391,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1328,6 +1407,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1346,6 +1426,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1361,6 +1442,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1379,6 +1461,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1394,6 +1477,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1413,6 +1497,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1429,6 +1514,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1448,6 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1464,6 +1551,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1483,6 +1571,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1499,6 +1588,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1519,6 +1609,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1536,6 +1627,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1556,6 +1648,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1573,6 +1666,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1593,6 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1610,6 +1705,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1631,6 +1727,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1649,6 +1746,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1670,6 +1768,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1688,6 +1787,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1709,6 +1809,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1727,6 +1828,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1749,6 +1851,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1768,6 +1871,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1790,6 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1809,6 +1914,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1831,6 +1937,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1850,6 +1957,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1867,6 +1975,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) @@ -1881,6 +1990,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1898,6 +2008,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) @@ -1912,6 +2023,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1928,6 +2040,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) @@ -1941,6 +2054,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1958,6 +2072,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1972,6 +2087,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1989,6 +2105,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2003,6 +2120,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2020,6 +2138,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2034,6 +2153,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2052,6 +2172,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2067,6 +2188,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2085,6 +2207,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2100,6 +2223,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2118,6 +2242,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2133,6 +2258,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2152,6 +2278,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2168,6 +2295,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2187,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2203,6 +2332,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2222,6 +2352,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2238,6 +2369,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2258,6 +2390,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2275,6 +2408,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2295,6 +2429,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2312,6 +2447,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2332,6 +2468,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2349,6 +2486,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2370,6 +2508,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2388,6 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2409,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2427,6 +2568,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2448,6 +2590,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2466,6 +2609,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2488,6 +2632,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2507,6 +2652,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2529,6 +2675,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2548,6 +2695,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2570,6 +2718,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2589,6 +2738,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2606,6 +2756,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) @@ -2620,6 +2771,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2637,6 +2789,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) @@ -2651,6 +2804,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2668,6 +2822,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) @@ -2682,6 +2837,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2699,6 +2855,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2713,6 +2870,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2730,6 +2888,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2744,6 +2903,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2761,6 +2921,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2775,6 +2936,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2793,6 +2955,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2808,6 +2971,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2826,6 +2990,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2841,6 +3006,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2859,6 +3025,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2874,6 +3041,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2893,6 +3061,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2909,6 +3078,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2928,6 +3098,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2944,6 +3115,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2963,6 +3135,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2979,6 +3152,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2999,6 +3173,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3016,6 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3036,6 +3212,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3053,6 +3230,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3073,6 +3251,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3090,6 +3269,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3111,6 +3291,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3129,6 +3310,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3150,6 +3332,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3168,6 +3351,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3189,6 +3373,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3207,6 +3392,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3229,6 +3415,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3248,6 +3435,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3270,6 +3458,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3289,6 +3478,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3311,6 +3501,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3330,6 +3521,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3347,6 +3539,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) @@ -3361,6 +3554,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3378,6 +3572,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) @@ -3392,6 +3587,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3408,6 +3604,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) @@ -3421,6 +3618,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3438,6 +3636,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3452,6 +3651,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3469,6 +3669,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3483,6 +3684,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3500,6 +3702,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3514,6 +3717,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3532,6 +3736,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3547,6 +3752,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3565,6 +3771,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3580,6 +3787,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3598,6 +3806,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3613,6 +3822,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3629,6 +3839,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) @@ -3642,6 +3853,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3659,6 +3871,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) @@ -3673,6 +3886,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3689,6 +3903,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) @@ -3702,6 +3917,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3719,6 +3935,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3733,6 +3950,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3750,6 +3968,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3764,6 +3983,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3781,6 +4001,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3795,6 +4016,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3813,6 +4035,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3828,6 +4051,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3846,6 +4070,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3861,6 +4086,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3879,6 +4105,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3894,6 +4121,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3913,6 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3929,6 +4158,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3948,6 +4178,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3964,6 +4195,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3983,6 +4215,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3999,6 +4232,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4019,6 +4253,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4036,6 +4271,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4056,6 +4292,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4073,6 +4310,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4093,6 +4331,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4110,6 +4349,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4131,6 +4371,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4149,6 +4390,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4170,6 +4412,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4188,6 +4431,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4209,6 +4453,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4227,6 +4472,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4249,6 +4495,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4268,6 +4515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4290,6 +4538,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4309,6 +4558,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4331,6 +4581,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4350,6 +4601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4367,6 +4619,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) @@ -4381,6 +4634,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4398,6 +4652,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) @@ -4412,6 +4667,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4429,6 +4685,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) @@ -4443,6 +4700,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4460,6 +4718,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) @@ -4474,6 +4733,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4491,6 +4751,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) @@ -4505,6 +4766,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4521,6 +4783,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) @@ -4534,6 +4797,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4551,6 +4815,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4565,6 +4830,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4582,6 +4848,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4596,6 +4863,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4613,6 +4881,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4627,6 +4896,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4645,6 +4915,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4660,6 +4931,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4678,6 +4950,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4693,6 +4966,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4711,6 +4985,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4726,6 +5001,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4745,6 +5021,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4761,6 +5038,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4780,6 +5058,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4796,6 +5075,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4815,6 +5095,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4831,6 +5112,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4851,6 +5133,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4868,6 +5151,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4888,6 +5172,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4905,6 +5190,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4925,6 +5211,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4942,6 +5229,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4963,6 +5251,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4981,6 +5270,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5002,6 +5292,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5020,6 +5311,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5041,6 +5333,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5059,6 +5352,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5081,6 +5375,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5100,6 +5395,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5122,6 +5418,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5141,6 +5438,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5163,6 +5461,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5182,6 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5199,6 +5499,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) @@ -5213,6 +5514,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5230,6 +5532,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) @@ -5244,6 +5547,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5261,6 +5565,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) @@ -5275,6 +5580,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5292,6 +5598,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5306,6 +5613,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5323,6 +5631,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5337,6 +5646,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5354,6 +5664,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5368,6 +5679,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5386,6 +5698,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5401,6 +5714,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5419,6 +5733,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5434,6 +5749,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5452,6 +5768,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5467,6 +5784,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5486,6 +5804,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5502,6 +5821,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5521,6 +5841,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5537,6 +5858,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5556,6 +5878,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5572,6 +5895,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5592,6 +5916,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5609,6 +5934,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5629,6 +5955,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5646,6 +5973,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5666,6 +5994,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5683,6 +6012,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5704,6 +6034,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5722,6 +6053,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5743,6 +6075,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5761,6 +6094,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5782,6 +6116,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5800,6 +6135,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5822,6 +6158,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5841,6 +6178,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5863,6 +6201,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5882,6 +6221,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5904,6 +6244,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5923,6 +6264,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5939,6 +6281,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) @@ -5952,6 +6295,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5969,6 +6313,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) @@ -5983,6 +6328,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6000,6 +6346,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) @@ -6014,6 +6361,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6031,6 +6379,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) @@ -6045,6 +6394,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6062,6 +6412,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) @@ -6076,6 +6427,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6093,6 +6445,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6107,6 +6460,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6124,6 +6478,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6138,6 +6493,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6155,6 +6511,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6169,6 +6526,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6187,6 +6545,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6202,6 +6561,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6220,6 +6580,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6235,6 +6596,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6253,6 +6615,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6268,6 +6631,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6287,6 +6651,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6303,6 +6668,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6322,6 +6688,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6338,6 +6705,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6357,6 +6725,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6373,6 +6742,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6393,6 +6763,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6410,6 +6781,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6430,6 +6802,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6447,6 +6820,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6467,6 +6841,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6484,6 +6859,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6505,6 +6881,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6523,6 +6900,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6544,6 +6922,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6562,6 +6941,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6583,6 +6963,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6601,6 +6982,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6623,6 +7005,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6642,6 +7025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6664,6 +7048,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6683,6 +7068,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6705,6 +7091,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6724,6 +7111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6741,6 +7129,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) @@ -6755,6 +7144,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6772,6 +7162,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) @@ -6786,6 +7177,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6803,6 +7195,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) @@ -6817,6 +7210,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6834,6 +7228,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6848,6 +7243,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6865,6 +7261,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6879,6 +7276,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6896,6 +7294,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6910,6 +7309,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6928,6 +7328,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6943,6 +7344,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6961,6 +7363,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6976,6 +7379,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6994,6 +7398,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7009,6 +7414,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7028,6 +7434,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7044,6 +7451,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7063,6 +7471,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7079,6 +7488,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7098,6 +7508,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7114,6 +7525,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7134,6 +7546,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7151,6 +7564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7171,6 +7585,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7188,6 +7603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7208,6 +7624,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7225,6 +7642,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7246,6 +7664,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7264,6 +7683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7285,6 +7705,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7303,6 +7724,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7324,6 +7746,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7342,6 +7765,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7364,6 +7788,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7383,6 +7808,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7405,6 +7831,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7424,6 +7851,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7446,6 +7874,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7465,6 +7894,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7482,6 +7912,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) @@ -7496,6 +7927,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7513,6 +7945,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) @@ -7527,6 +7960,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7544,6 +7978,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) @@ -7558,6 +7993,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7575,6 +8011,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7589,6 +8026,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7606,6 +8044,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7620,6 +8059,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7637,6 +8077,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7651,6 +8092,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7669,6 +8111,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7684,6 +8127,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7702,6 +8146,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7717,6 +8162,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7735,6 +8181,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7750,6 +8197,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7767,6 +8215,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) @@ -7781,6 +8230,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7798,6 +8248,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) @@ -7812,6 +8263,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7828,6 +8280,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) @@ -7841,6 +8294,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7858,6 +8312,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i32 %vl) @@ -7872,6 +8327,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7889,6 +8345,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i32 %vl) @@ -7903,6 +8360,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7920,6 +8378,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i32 %vl) @@ -7934,6 +8393,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7951,6 +8411,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i32 %vl) @@ -7965,6 +8426,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7982,6 +8444,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i32 %vl) @@ -7996,6 +8459,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8013,6 +8477,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i32 %vl) @@ -8027,6 +8492,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8044,6 +8510,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8058,6 +8525,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8075,6 +8543,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8089,6 +8558,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8106,6 +8576,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8120,6 +8591,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8138,6 +8610,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8153,6 +8626,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8171,6 +8645,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8186,6 +8661,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8204,6 +8680,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8219,6 +8696,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8238,6 +8716,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8254,6 +8733,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8273,6 +8753,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8289,6 +8770,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8308,6 +8790,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8324,6 +8807,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8344,6 +8828,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8361,6 +8846,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8381,6 +8867,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8398,6 +8885,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8418,6 +8906,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8435,6 +8924,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8456,6 +8946,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8474,6 +8965,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8495,6 +8987,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8513,6 +9006,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8534,6 +9028,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8552,6 +9047,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8574,6 +9070,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8593,6 +9090,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8615,6 +9113,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8634,6 +9133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8656,6 +9156,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8675,6 +9176,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8692,6 +9194,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) @@ -8706,6 +9209,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8723,6 +9227,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) @@ -8737,6 +9242,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8754,6 +9260,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) @@ -8768,6 +9275,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8785,6 +9293,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8799,6 +9308,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8816,6 +9326,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8830,6 +9341,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8847,6 +9359,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8861,6 +9374,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8879,6 +9393,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8894,6 +9409,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8912,6 +9428,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8927,6 +9444,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8945,6 +9463,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8960,6 +9479,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8979,6 +9499,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8995,6 +9516,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9014,6 +9536,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9030,6 +9553,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9049,6 +9573,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9065,6 +9590,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9085,6 +9611,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9102,6 +9629,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9122,6 +9650,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9139,6 +9668,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9159,6 +9689,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9176,6 +9707,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9197,6 +9729,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9215,6 +9748,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9236,6 +9770,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9254,6 +9789,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9275,6 +9811,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9293,6 +9830,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9315,6 +9853,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9334,6 +9873,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9356,6 +9896,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9375,6 +9916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9397,6 +9939,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9416,6 +9959,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9433,6 +9977,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) @@ -9447,6 +9992,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9464,6 +10010,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) @@ -9478,6 +10025,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9495,6 +10043,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) @@ -9509,6 +10058,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9526,6 +10076,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9540,6 +10091,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9557,6 +10109,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9571,6 +10124,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9588,6 +10142,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9602,6 +10157,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9620,6 +10176,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9635,6 +10192,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9653,6 +10211,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9668,6 +10227,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9686,6 +10246,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9701,6 +10262,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9720,6 +10282,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9736,6 +10299,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9755,6 +10319,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9771,6 +10336,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9790,6 +10356,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9806,6 +10373,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9826,6 +10394,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9843,6 +10412,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9863,6 +10433,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9880,6 +10451,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9900,6 +10472,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9917,6 +10490,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9938,6 +10512,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9956,6 +10531,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9977,6 +10553,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9995,6 +10572,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10016,6 +10594,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10034,6 +10613,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10056,6 +10636,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10075,6 +10656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10097,6 +10679,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10116,6 +10699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10138,6 +10722,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10157,6 +10742,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10174,6 +10760,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) @@ -10188,6 +10775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10205,6 +10793,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) @@ -10219,6 +10808,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10236,6 +10826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) @@ -10250,6 +10841,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10267,6 +10859,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10281,6 +10874,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10298,6 +10892,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10312,6 +10907,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10329,6 +10925,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10343,6 +10940,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10361,6 +10959,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10376,6 +10975,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10394,6 +10994,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10409,6 +11010,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10427,6 +11029,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10442,6 +11045,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10461,6 +11065,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10477,6 +11082,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10496,6 +11102,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10512,6 +11119,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10531,6 +11139,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10547,6 +11156,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10567,6 +11177,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10584,6 +11195,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10604,6 +11216,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10621,6 +11234,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10641,6 +11255,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10658,6 +11273,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10679,6 +11295,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10697,6 +11314,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10718,6 +11336,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10736,6 +11355,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10757,6 +11377,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10775,6 +11396,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10797,6 +11419,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10816,6 +11439,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10838,6 +11462,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10857,6 +11482,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10879,6 +11505,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10898,6 +11525,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10915,6 +11543,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) @@ -10929,6 +11558,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10946,6 +11576,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) @@ -10960,6 +11591,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10976,6 +11608,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) @@ -10989,6 +11622,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11006,6 +11640,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11020,6 +11655,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11037,6 +11673,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11051,6 +11688,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11068,6 +11706,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11082,6 +11721,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11100,6 +11740,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11115,6 +11756,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11133,6 +11775,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11148,6 +11791,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11166,6 +11810,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11181,6 +11826,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11198,6 +11844,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) @@ -11212,6 +11859,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11229,6 +11877,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) @@ -11243,6 +11892,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11260,6 +11910,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) @@ -11274,6 +11925,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11291,6 +11943,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i32 %vl) @@ -11305,6 +11958,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11322,6 +11976,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i32 %vl) @@ -11336,6 +11991,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11353,6 +12009,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i32 %vl) @@ -11367,6 +12024,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11384,6 +12042,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11398,6 +12057,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11415,6 +12075,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11429,6 +12090,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11446,6 +12108,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11460,6 +12123,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11478,6 +12142,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11493,6 +12158,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11511,6 +12177,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11526,6 +12193,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11544,6 +12212,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11559,6 +12228,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11576,6 +12246,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) @@ -11590,6 +12261,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11607,6 +12279,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) @@ -11621,6 +12294,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11637,6 +12311,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) @@ -11650,6 +12325,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11667,6 +12343,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11681,6 +12358,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11698,6 +12376,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11712,6 +12391,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11729,6 +12409,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11743,6 +12424,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11761,6 +12443,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11776,6 +12459,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11794,6 +12478,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11809,6 +12494,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11827,6 +12513,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11842,6 +12529,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11861,6 +12549,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11877,6 +12566,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11896,6 +12586,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11912,6 +12603,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11931,6 +12623,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11947,6 +12640,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11967,6 +12661,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11984,6 +12679,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12004,6 +12700,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12021,6 +12718,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12041,6 +12739,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12058,6 +12757,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12079,6 +12779,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12097,6 +12798,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12118,6 +12820,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12136,6 +12839,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12157,6 +12861,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12175,6 +12880,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12197,6 +12903,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12216,6 +12923,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12238,6 +12946,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12257,6 +12966,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12279,6 +12989,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12298,6 +13009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12315,6 +13027,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) @@ -12329,6 +13042,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12346,6 +13060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) @@ -12360,6 +13075,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12377,6 +13093,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) @@ -12391,6 +13108,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12408,6 +13126,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12422,6 +13141,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12439,6 +13159,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12453,6 +13174,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12470,6 +13192,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12484,6 +13207,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12502,6 +13226,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12517,6 +13242,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12535,6 +13261,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12550,6 +13277,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12568,6 +13296,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12583,6 +13312,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12602,6 +13332,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12618,6 +13349,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12637,6 +13369,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12653,6 +13386,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12672,6 +13406,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12688,6 +13423,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12708,6 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12725,6 +13462,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12745,6 +13483,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12762,6 +13501,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12782,6 +13522,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12799,6 +13540,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12820,6 +13562,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12838,6 +13581,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12859,6 +13603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12877,6 +13622,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12898,6 +13644,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12916,6 +13663,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12938,6 +13686,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12957,6 +13706,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12979,6 +13729,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12998,6 +13749,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13020,6 +13772,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -13039,6 +13792,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13056,6 +13810,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) @@ -13070,6 +13825,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13087,6 +13843,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) @@ -13101,6 +13858,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13118,6 +13876,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) @@ -13132,6 +13891,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13149,6 +13909,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13163,6 +13924,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13180,6 +13942,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13194,6 +13957,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13211,6 +13975,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13225,6 +13990,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13243,6 +14009,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13258,6 +14025,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13276,6 +14044,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13291,6 +14060,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13309,6 +14079,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13324,6 +14095,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) @@ -27,6 +28,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) @@ -58,6 +61,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -74,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) @@ -87,6 +92,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -104,6 +110,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -135,6 +143,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) @@ -149,6 +158,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -165,6 +175,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) @@ -178,6 +189,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -195,6 +207,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) @@ -209,6 +222,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -226,6 +240,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -240,6 +255,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -257,6 +273,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -288,6 +306,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -302,6 +321,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -319,6 +339,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -333,6 +354,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -351,6 +373,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -366,6 +389,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -384,6 +408,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -399,6 +424,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -417,6 +443,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -432,6 +459,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -450,6 +478,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -465,6 +494,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -481,6 +511,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) @@ -494,6 +525,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -511,6 +543,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) @@ -525,6 +558,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -541,6 +575,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) @@ -554,6 +589,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -571,6 +607,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -585,6 +622,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -602,6 +640,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -616,6 +655,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -633,6 +673,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -647,6 +688,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -665,6 +707,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -680,6 +723,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -698,6 +742,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -713,6 +758,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -731,6 +777,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -746,6 +793,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -763,6 +811,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, i64 %vl) @@ -777,6 +826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -794,6 +844,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, i64 %vl) @@ -808,6 +859,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -825,6 +877,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, i64 %vl) @@ -839,6 +892,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -856,6 +910,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, i64 %vl) @@ -870,6 +925,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -887,6 +943,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -901,6 +958,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -918,6 +976,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -932,6 +991,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -949,6 +1009,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -963,6 +1024,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -980,6 +1042,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -994,6 +1057,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1012,6 +1076,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1027,6 +1092,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1045,6 +1111,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1060,6 +1127,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1078,6 +1146,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1093,6 +1162,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1111,6 +1181,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1126,6 +1197,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1145,6 +1217,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1161,6 +1234,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1180,6 +1254,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1196,6 +1271,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1215,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1231,6 +1308,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1250,6 +1328,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1266,6 +1345,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1286,6 +1366,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1303,6 +1384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1323,6 +1405,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1340,6 +1423,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1360,6 +1444,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1377,6 +1462,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1397,6 +1483,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1414,6 +1501,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1435,6 +1523,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1453,6 +1542,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1474,6 +1564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1492,6 +1583,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1513,6 +1605,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1531,6 +1624,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1552,6 +1646,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1570,6 +1665,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1592,6 +1688,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1611,6 +1708,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1633,6 +1731,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1652,6 +1751,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1674,6 +1774,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1693,6 +1794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1715,6 +1817,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1734,6 +1837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1751,6 +1855,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) @@ -1765,6 +1870,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1782,6 +1888,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) @@ -1796,6 +1903,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1813,6 +1921,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) @@ -1827,6 +1936,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1844,6 +1954,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) @@ -1858,6 +1969,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1875,6 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1889,6 +2002,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1906,6 +2020,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1920,6 +2035,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1937,6 +2053,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1951,6 +2068,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1968,6 +2086,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1982,6 +2101,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2000,6 +2120,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2015,6 +2136,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2033,6 +2155,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2048,6 +2171,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2066,6 +2190,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2081,6 +2206,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2099,6 +2225,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2114,6 +2241,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2133,6 +2261,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2149,6 +2278,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2168,6 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2184,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2203,6 +2335,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2219,6 +2352,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2238,6 +2372,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2254,6 +2389,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2274,6 +2410,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2291,6 +2428,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2311,6 +2449,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2328,6 +2467,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2348,6 +2488,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2365,6 +2506,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2385,6 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2402,6 +2545,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2423,6 +2567,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2441,6 +2586,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2462,6 +2608,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2480,6 +2627,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2501,6 +2649,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2519,6 +2668,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2540,6 +2690,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2558,6 +2709,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2580,6 +2732,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2599,6 +2752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2621,6 +2775,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2640,6 +2795,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2662,6 +2818,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2681,6 +2838,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2703,6 +2861,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2722,6 +2881,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2739,6 +2899,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) @@ -2753,6 +2914,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2770,6 +2932,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) @@ -2784,6 +2947,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2800,6 +2964,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) @@ -2813,6 +2978,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2829,6 +2995,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) @@ -2842,6 +3009,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2859,6 +3027,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2873,6 +3042,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2890,6 +3060,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2904,6 +3075,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2921,6 +3093,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2935,6 +3108,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2952,6 +3126,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2966,6 +3141,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2984,6 +3160,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2999,6 +3176,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3017,6 +3195,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3032,6 +3211,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3050,6 +3230,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3065,6 +3246,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3083,6 +3265,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3098,6 +3281,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3114,6 +3298,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) @@ -3127,6 +3312,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3144,6 +3330,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) @@ -3158,6 +3345,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3174,6 +3362,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) @@ -3187,6 +3376,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3204,6 +3394,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) @@ -3218,6 +3409,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3235,6 +3427,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3249,6 +3442,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3266,6 +3460,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3280,6 +3475,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3297,6 +3493,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3311,6 +3508,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3328,6 +3526,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3342,6 +3541,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3360,6 +3560,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3375,6 +3576,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3393,6 +3595,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3408,6 +3611,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3426,6 +3630,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3441,6 +3646,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3459,6 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3474,6 +3681,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3493,6 +3701,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3509,6 +3718,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3528,6 +3738,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3544,6 +3755,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3563,6 +3775,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3579,6 +3792,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3598,6 +3812,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3614,6 +3829,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3634,6 +3850,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3651,6 +3868,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3671,6 +3889,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3688,6 +3907,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3708,6 +3928,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3725,6 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3745,6 +3967,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3762,6 +3985,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3783,6 +4007,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3801,6 +4026,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3822,6 +4048,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3840,6 +4067,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3861,6 +4089,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3879,6 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3900,6 +4130,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3918,6 +4149,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3940,6 +4172,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3959,6 +4192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3981,6 +4215,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4000,6 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4022,6 +4258,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4041,6 +4278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4063,6 +4301,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4082,6 +4321,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4099,6 +4339,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) @@ -4113,6 +4354,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4130,6 +4372,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) @@ -4144,6 +4387,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4161,6 +4405,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) @@ -4175,6 +4420,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4192,6 +4438,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) @@ -4206,6 +4453,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4223,6 +4471,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4237,6 +4486,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4254,6 +4504,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4268,6 +4519,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4285,6 +4537,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4299,6 +4552,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4316,6 +4570,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4330,6 +4585,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4348,6 +4604,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4363,6 +4620,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4381,6 +4639,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4396,6 +4655,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4414,6 +4674,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4429,6 +4690,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4447,6 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4462,6 +4725,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4481,6 +4745,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4497,6 +4762,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4516,6 +4782,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4532,6 +4799,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4551,6 +4819,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4567,6 +4836,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4586,6 +4856,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4602,6 +4873,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4622,6 +4894,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4639,6 +4912,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4659,6 +4933,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4676,6 +4951,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4696,6 +4972,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4713,6 +4990,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4733,6 +5011,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4750,6 +5029,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4771,6 +5051,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4789,6 +5070,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4810,6 +5092,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4828,6 +5111,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4849,6 +5133,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4867,6 +5152,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4888,6 +5174,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4906,6 +5193,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4928,6 +5216,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4947,6 +5236,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4969,6 +5259,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4988,6 +5279,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5010,6 +5302,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5029,6 +5322,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5051,6 +5345,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5070,6 +5365,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5087,6 +5383,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) @@ -5101,6 +5398,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5118,6 +5416,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) @@ -5132,6 +5431,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5149,6 +5449,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) @@ -5163,6 +5464,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5179,6 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) @@ -5192,6 +5495,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5209,6 +5513,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5223,6 +5528,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5240,6 +5546,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5254,6 +5561,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5271,6 +5579,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5285,6 +5594,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5302,6 +5612,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5316,6 +5627,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5334,6 +5646,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5349,6 +5662,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5367,6 +5681,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5382,6 +5697,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5400,6 +5716,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5415,6 +5732,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5433,6 +5751,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5448,6 +5767,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5467,6 +5787,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5483,6 +5804,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5502,6 +5824,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5518,6 +5841,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5537,6 +5861,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5553,6 +5878,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5572,6 +5898,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5588,6 +5915,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5608,6 +5936,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5625,6 +5954,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5645,6 +5975,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5662,6 +5993,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5682,6 +6014,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5699,6 +6032,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5719,6 +6053,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5736,6 +6071,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5757,6 +6093,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5775,6 +6112,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5796,6 +6134,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5814,6 +6153,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5835,6 +6175,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5853,6 +6194,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5874,6 +6216,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5892,6 +6235,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5914,6 +6258,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5933,6 +6278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5955,6 +6301,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5974,6 +6321,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5996,6 +6344,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6015,6 +6364,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6037,6 +6387,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6056,6 +6407,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6072,6 +6424,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) @@ -6085,6 +6438,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6102,6 +6456,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) @@ -6116,6 +6471,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6132,6 +6488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) @@ -6145,6 +6502,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6161,6 +6519,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) @@ -6174,6 +6533,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6191,6 +6551,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6205,6 +6566,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6222,6 +6584,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6236,6 +6599,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6253,6 +6617,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6267,6 +6632,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6284,6 +6650,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6298,6 +6665,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6316,6 +6684,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6331,6 +6700,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6349,6 +6719,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6364,6 +6735,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6382,6 +6754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6397,6 +6770,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6415,6 +6789,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6430,6 +6805,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6449,6 +6825,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6465,6 +6842,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6484,6 +6862,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6500,6 +6879,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6519,6 +6899,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6535,6 +6916,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6554,6 +6936,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6570,6 +6953,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6590,6 +6974,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6607,6 +6992,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6627,6 +7013,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6644,6 +7031,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6664,6 +7052,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6681,6 +7070,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6701,6 +7091,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6718,6 +7109,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6739,6 +7131,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6757,6 +7150,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6778,6 +7172,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6796,6 +7191,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6817,6 +7213,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6835,6 +7232,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6856,6 +7254,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6874,6 +7273,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6896,6 +7296,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6915,6 +7316,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6937,6 +7339,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6956,6 +7359,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6978,6 +7382,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6997,6 +7402,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7019,6 +7425,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -7038,6 +7445,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7055,6 +7463,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, i64 %vl) @@ -7069,6 +7478,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7086,6 +7496,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, i64 %vl) @@ -7100,6 +7511,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7117,6 +7529,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, i64 %vl) @@ -7131,6 +7544,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7148,6 +7562,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, i64 %vl) @@ -7162,6 +7577,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7178,6 +7594,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) @@ -7191,6 +7608,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7208,6 +7626,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) @@ -7222,6 +7641,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7238,6 +7658,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) @@ -7251,6 +7672,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7268,6 +7690,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) @@ -7282,6 +7705,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7299,6 +7723,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7313,6 +7738,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7330,6 +7756,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7344,6 +7771,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7361,6 +7789,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7375,6 +7804,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7392,6 +7822,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7406,6 +7837,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7424,6 +7856,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7439,6 +7872,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7457,6 +7891,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7472,6 +7907,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7490,6 +7926,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7505,6 +7942,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7523,6 +7961,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7538,6 +7977,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7557,6 +7997,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7573,6 +8014,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7592,6 +8034,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7608,6 +8051,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7627,6 +8071,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7643,6 +8088,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7662,6 +8108,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7678,6 +8125,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7698,6 +8146,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7715,6 +8164,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7735,6 +8185,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7752,6 +8203,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7772,6 +8224,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7789,6 +8242,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7809,6 +8263,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7826,6 +8281,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7847,6 +8303,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7865,6 +8322,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7886,6 +8344,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7904,6 +8363,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7925,6 +8385,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7943,6 +8404,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7964,6 +8426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7982,6 +8445,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8004,6 +8468,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8023,6 +8488,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8045,6 +8511,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8064,6 +8531,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8086,6 +8554,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8105,6 +8574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8127,6 +8597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8146,6 +8617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8163,6 +8635,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) @@ -8177,6 +8650,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8194,6 +8668,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) @@ -8208,6 +8683,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8225,6 +8701,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) @@ -8239,6 +8716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8256,6 +8734,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) @@ -8270,6 +8749,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8287,6 +8767,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8301,6 +8782,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8318,6 +8800,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8332,6 +8815,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8349,6 +8833,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8363,6 +8848,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8380,6 +8866,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8394,6 +8881,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8412,6 +8900,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8427,6 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8445,6 +8935,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8460,6 +8951,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8478,6 +8970,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8493,6 +8986,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8511,6 +9005,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8526,6 +9021,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8545,6 +9041,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8561,6 +9058,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8580,6 +9078,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8596,6 +9095,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8615,6 +9115,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8631,6 +9132,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8650,6 +9152,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8666,6 +9169,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8686,6 +9190,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8703,6 +9208,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8723,6 +9229,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8740,6 +9247,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8760,6 +9268,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8777,6 +9286,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8797,6 +9307,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8814,6 +9325,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8835,6 +9347,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8853,6 +9366,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8874,6 +9388,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8892,6 +9407,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8913,6 +9429,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8931,6 +9448,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8952,6 +9470,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8970,6 +9489,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8992,6 +9512,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9011,6 +9532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9033,6 +9555,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9052,6 +9575,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9074,6 +9598,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9093,6 +9618,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9115,6 +9641,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9134,6 +9661,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9151,6 +9679,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) @@ -9165,6 +9694,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9182,6 +9712,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) @@ -9196,6 +9727,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9213,6 +9745,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) @@ -9227,6 +9760,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9243,6 +9777,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) @@ -9256,6 +9791,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9273,6 +9809,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9287,6 +9824,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9304,6 +9842,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9318,6 +9857,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9335,6 +9875,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9349,6 +9890,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9366,6 +9908,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9380,6 +9923,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9398,6 +9942,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9413,6 +9958,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9431,6 +9977,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9446,6 +9993,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9464,6 +10012,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9479,6 +10028,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9497,6 +10047,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9512,6 +10063,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9531,6 +10083,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9547,6 +10100,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9566,6 +10120,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9582,6 +10137,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9601,6 +10157,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9617,6 +10174,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9636,6 +10194,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9652,6 +10211,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9672,6 +10232,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9689,6 +10250,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9709,6 +10271,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9726,6 +10289,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9746,6 +10310,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9763,6 +10328,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9783,6 +10349,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9800,6 +10367,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9821,6 +10389,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9839,6 +10408,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9860,6 +10430,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9878,6 +10449,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9899,6 +10471,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9917,6 +10490,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9938,6 +10512,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9956,6 +10531,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9978,6 +10554,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9997,6 +10574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10019,6 +10597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10038,6 +10617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10060,6 +10640,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10079,6 +10660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10101,6 +10683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10120,6 +10703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10137,6 +10721,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) @@ -10151,6 +10736,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10168,6 +10754,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) @@ -10182,6 +10769,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10198,6 +10786,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) @@ -10211,6 +10800,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10228,6 +10818,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) @@ -10242,6 +10833,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10258,6 +10850,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) @@ -10271,6 +10864,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10288,6 +10882,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) @@ -10302,6 +10897,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10319,6 +10915,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) @@ -10333,6 +10930,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10350,6 +10948,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) @@ -10364,6 +10963,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10381,6 +10981,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) @@ -10395,6 +10996,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10411,6 +11013,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) @@ -10424,6 +11027,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10441,6 +11045,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10455,6 +11060,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10472,6 +11078,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10486,6 +11093,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10503,6 +11111,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10517,6 +11126,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10534,6 +11144,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10548,6 +11159,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10566,6 +11178,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10581,6 +11194,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10599,6 +11213,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10614,6 +11229,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10632,6 +11248,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10647,6 +11264,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10665,6 +11283,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10680,6 +11299,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10699,6 +11319,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10715,6 +11336,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10734,6 +11356,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10750,6 +11373,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10769,6 +11393,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10785,6 +11410,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10804,6 +11430,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10820,6 +11447,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10840,6 +11468,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10857,6 +11486,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10877,6 +11507,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10894,6 +11525,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10914,6 +11546,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10931,6 +11564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10951,6 +11585,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10968,6 +11603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10989,6 +11625,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11007,6 +11644,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11028,6 +11666,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11046,6 +11685,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11067,6 +11707,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11085,6 +11726,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11106,6 +11748,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11124,6 +11767,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11146,6 +11790,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11165,6 +11810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11187,6 +11833,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11206,6 +11853,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11228,6 +11876,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11247,6 +11896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11269,6 +11919,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11288,6 +11939,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11305,6 +11957,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, i64 %vl) @@ -11319,6 +11972,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11336,6 +11990,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, i64 %vl) @@ -11350,6 +12005,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11367,6 +12023,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, i64 %vl) @@ -11381,6 +12038,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11398,6 +12056,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, i64 %vl) @@ -11412,6 +12071,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11429,6 +12089,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11443,6 +12104,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11460,6 +12122,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11474,6 +12137,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11491,6 +12155,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11505,6 +12170,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11522,6 +12188,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11536,6 +12203,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11554,6 +12222,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11569,6 +12238,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11587,6 +12257,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11602,6 +12273,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11620,6 +12292,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11635,6 +12308,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11653,6 +12327,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11668,6 +12343,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11685,6 +12361,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) @@ -11699,6 +12376,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11716,6 +12394,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) @@ -11730,6 +12409,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11746,6 +12426,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) @@ -11759,6 +12440,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11776,6 +12458,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i64 %vl) @@ -11790,6 +12473,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11807,6 +12491,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i64 %vl) @@ -11821,6 +12506,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11838,6 +12524,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64( %val, %val, double* %base, %index, i64 %vl) @@ -11852,6 +12539,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11869,6 +12557,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i64 %vl) @@ -11883,6 +12572,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11900,6 +12590,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64( %val, %val, double* %base, %index, i64 %vl) @@ -11914,6 +12605,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11931,6 +12623,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i64 %vl) @@ -11945,6 +12638,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11962,6 +12656,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i64 %vl) @@ -11976,6 +12671,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11993,6 +12689,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i64 %vl) @@ -12007,6 +12704,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12024,6 +12722,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12038,6 +12737,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12055,6 +12755,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12069,6 +12770,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12086,6 +12788,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12100,6 +12803,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12117,6 +12821,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12131,6 +12836,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12149,6 +12855,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12164,6 +12871,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12182,6 +12890,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12197,6 +12906,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12215,6 +12925,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12230,6 +12941,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12248,6 +12960,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12263,6 +12976,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12282,6 +12996,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12298,6 +13013,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12317,6 +13033,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12333,6 +13050,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12352,6 +13070,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12368,6 +13087,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12387,6 +13107,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12403,6 +13124,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12423,6 +13145,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12440,6 +13163,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12460,6 +13184,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12477,6 +13202,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12497,6 +13223,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12514,6 +13241,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12534,6 +13262,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12551,6 +13280,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12572,6 +13302,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12590,6 +13321,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12611,6 +13343,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12629,6 +13362,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12650,6 +13384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12668,6 +13403,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12689,6 +13425,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12707,6 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12729,6 +13467,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12748,6 +13487,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12770,6 +13510,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12789,6 +13530,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12811,6 +13553,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12830,6 +13573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12852,6 +13596,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12871,6 +13616,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12888,6 +13634,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) @@ -12902,6 +13649,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12919,6 +13667,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) @@ -12933,6 +13682,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12950,6 +13700,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) @@ -12964,6 +13715,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12980,6 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) @@ -12993,6 +13746,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13010,6 +13764,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13024,6 +13779,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13041,6 +13797,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13055,6 +13812,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13072,6 +13830,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13086,6 +13845,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13103,6 +13863,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13117,6 +13878,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13135,6 +13897,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13150,6 +13913,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13168,6 +13932,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13183,6 +13948,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13201,6 +13967,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13216,6 +13983,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13234,6 +14002,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13249,6 +14018,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13268,6 +14038,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13284,6 +14055,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13303,6 +14075,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13319,6 +14092,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13338,6 +14112,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13354,6 +14129,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13373,6 +14149,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13389,6 +14166,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13409,6 +14187,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13426,6 +14205,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13446,6 +14226,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13463,6 +14244,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13483,6 +14265,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13500,6 +14283,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13520,6 +14304,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13537,6 +14322,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13558,6 +14344,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13576,6 +14363,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13597,6 +14385,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13615,6 +14404,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13636,6 +14426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13654,6 +14445,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13675,6 +14467,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13693,6 +14486,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13715,6 +14509,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13734,6 +14529,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13756,6 +14552,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13775,6 +14572,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13797,6 +14595,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13816,6 +14615,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13838,6 +14638,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13857,6 +14658,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13874,6 +14676,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) @@ -13888,6 +14691,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13905,6 +14709,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) @@ -13919,6 +14724,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13936,6 +14742,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) @@ -13950,6 +14757,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13967,6 +14775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) @@ -13981,6 +14790,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13998,6 +14808,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14012,6 +14823,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14029,6 +14841,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14043,6 +14856,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14060,6 +14874,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14074,6 +14889,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14091,6 +14907,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14105,6 +14922,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14123,6 +14941,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14138,6 +14957,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14156,6 +14976,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14171,6 +14992,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14189,6 +15011,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14204,6 +15027,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14222,6 +15046,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14237,6 +15062,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14256,6 +15082,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14272,6 +15099,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14291,6 +15119,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14307,6 +15136,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14326,6 +15156,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14342,6 +15173,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14361,6 +15193,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14377,6 +15210,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14397,6 +15231,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14414,6 +15249,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14434,6 +15270,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14451,6 +15288,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14471,6 +15309,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14488,6 +15327,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14508,6 +15348,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14525,6 +15366,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14546,6 +15388,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14564,6 +15407,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14585,6 +15429,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14603,6 +15448,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14624,6 +15470,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14642,6 +15489,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14663,6 +15511,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14681,6 +15530,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14703,6 +15553,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14722,6 +15573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14744,6 +15596,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14763,6 +15616,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14785,6 +15639,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14804,6 +15659,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14826,6 +15682,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14845,6 +15702,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14862,6 +15720,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) @@ -14876,6 +15735,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14893,6 +15753,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) @@ -14907,6 +15768,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14924,6 +15786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) @@ -14938,6 +15801,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14955,6 +15819,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) @@ -14969,6 +15834,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14986,6 +15852,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15000,6 +15867,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15017,6 +15885,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15031,6 +15900,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15048,6 +15918,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15062,6 +15933,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15079,6 +15951,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15093,6 +15966,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15111,6 +15985,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15126,6 +16001,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15144,6 +16020,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15159,6 +16036,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15177,6 +16055,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15192,6 +16071,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15210,6 +16090,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15225,6 +16106,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15244,6 +16126,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15260,6 +16143,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15279,6 +16163,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15295,6 +16180,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15314,6 +16200,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15330,6 +16217,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15349,6 +16237,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15365,6 +16254,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15385,6 +16275,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15402,6 +16293,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15422,6 +16314,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15439,6 +16332,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15459,6 +16353,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15476,6 +16371,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15496,6 +16392,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15513,6 +16410,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15534,6 +16432,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15552,6 +16451,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15573,6 +16473,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15591,6 +16492,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15612,6 +16514,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15630,6 +16533,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15651,6 +16555,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15669,6 +16574,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15691,6 +16597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15710,6 +16617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15732,6 +16640,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15751,6 +16660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15773,6 +16683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15792,6 +16703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15814,6 +16726,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15833,6 +16746,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15850,6 +16764,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) @@ -15864,6 +16779,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15881,6 +16797,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) @@ -15895,6 +16812,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15911,6 +16829,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) @@ -15924,6 +16843,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15940,6 +16860,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) @@ -15953,6 +16874,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15970,6 +16892,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -15984,6 +16907,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16001,6 +16925,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16015,6 +16940,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16032,6 +16958,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16046,6 +16973,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16063,6 +16991,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16077,6 +17006,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16095,6 +17025,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16110,6 +17041,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16128,6 +17060,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16143,6 +17076,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16161,6 +17095,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16176,6 +17111,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16194,6 +17130,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16209,6 +17146,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16226,6 +17164,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) @@ -16240,6 +17179,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16257,6 +17197,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) @@ -16271,6 +17212,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16287,6 +17229,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) @@ -16300,6 +17243,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16317,6 +17261,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) @@ -16331,6 +17276,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16348,6 +17294,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i64 %vl) @@ -16362,6 +17309,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16379,6 +17327,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i64 %vl) @@ -16393,6 +17342,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16410,6 +17360,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i64 %vl) @@ -16424,6 +17375,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16441,6 +17393,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64( %val, %val, double* %base, %index, i64 %vl) @@ -16455,6 +17408,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16472,6 +17426,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16486,6 +17441,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16503,6 +17459,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16517,6 +17474,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16534,6 +17492,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16548,6 +17507,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16565,6 +17525,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16579,6 +17540,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16597,6 +17559,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16612,6 +17575,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16630,6 +17594,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16645,6 +17610,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16663,6 +17629,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16678,6 +17645,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16696,6 +17664,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16711,6 +17680,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16727,6 +17697,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) @@ -16740,6 +17711,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16757,6 +17729,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) @@ -16771,6 +17744,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16787,6 +17761,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) @@ -16800,6 +17775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16817,6 +17793,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) @@ -16831,6 +17808,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16848,6 +17826,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16862,6 +17841,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16879,6 +17859,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16893,6 +17874,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16910,6 +17892,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16924,6 +17907,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16941,6 +17925,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16955,6 +17940,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16973,6 +17959,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16988,6 +17975,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17006,6 +17994,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17021,6 +18010,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17039,6 +18029,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17054,6 +18045,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17072,6 +18064,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17087,6 +18080,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17106,6 +18100,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17122,6 +18117,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17141,6 +18137,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17157,6 +18154,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17176,6 +18174,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17192,6 +18191,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17211,6 +18211,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17227,6 +18228,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17247,6 +18249,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17264,6 +18267,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17284,6 +18288,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17301,6 +18306,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17321,6 +18327,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17338,6 +18345,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17358,6 +18366,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17375,6 +18384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17396,6 +18406,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17414,6 +18425,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17435,6 +18447,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17453,6 +18466,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17474,6 +18488,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17492,6 +18507,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17513,6 +18529,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17531,6 +18548,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17553,6 +18571,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17572,6 +18591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17594,6 +18614,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17613,6 +18634,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17635,6 +18657,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17654,6 +18677,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17676,6 +18700,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17695,6 +18720,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17712,6 +18738,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) @@ -17726,6 +18753,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17743,6 +18771,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) @@ -17757,6 +18786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17774,6 +18804,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) @@ -17788,6 +18819,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17804,6 +18836,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) @@ -17817,6 +18850,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17834,6 +18868,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17848,6 +18883,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17865,6 +18901,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17879,6 +18916,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17896,6 +18934,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17910,6 +18949,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17927,6 +18967,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17941,6 +18982,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17959,6 +19001,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17974,6 +19017,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17992,6 +19036,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18007,6 +19052,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18025,6 +19071,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18040,6 +19087,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18058,6 +19106,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18073,6 +19122,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18092,6 +19142,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18108,6 +19159,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18127,6 +19179,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18143,6 +19196,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18162,6 +19216,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18178,6 +19233,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18197,6 +19253,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18213,6 +19270,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18233,6 +19291,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18250,6 +19309,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18270,6 +19330,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18287,6 +19348,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18307,6 +19369,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18324,6 +19387,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18344,6 +19408,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18361,6 +19426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18382,6 +19448,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18400,6 +19467,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18421,6 +19489,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18439,6 +19508,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18460,6 +19530,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18478,6 +19549,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18499,6 +19571,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18517,6 +19590,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18539,6 +19613,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18558,6 +19633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18580,6 +19656,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18599,6 +19676,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18621,6 +19699,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18640,6 +19719,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18662,6 +19742,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18681,6 +19762,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsoxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18698,6 +19780,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) @@ -18712,6 +19795,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18729,6 +19813,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) @@ -18743,6 +19828,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18759,6 +19845,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) @@ -18772,6 +19859,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18789,6 +19877,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) @@ -18803,6 +19892,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18820,6 +19910,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18834,6 +19925,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18851,6 +19943,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18865,6 +19958,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18882,6 +19976,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18896,6 +19991,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18913,6 +20009,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18927,6 +20024,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18945,6 +20043,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18960,6 +20059,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18978,6 +20078,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18993,6 +20094,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19011,6 +20113,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19026,6 +20129,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19044,6 +20148,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19059,6 +20164,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsoxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll @@ -9,12 +9,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32V-NEXT: vfmv.v.f v8, fa0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8f16: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64V-NEXT: vfmv.v.f v8, fa0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, half %f, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -26,12 +28,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32V-NEXT: vmv.v.i v8, 0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_zero_nxv8f16: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64V-NEXT: vmv.v.i v8, 0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, half zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -43,12 +47,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32V-NEXT: vfmv.v.f v8, fa0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8f32: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64V-NEXT: vfmv.v.f v8, fa0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, float %f, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -60,12 +66,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32V-NEXT: vmv.v.i v8, 0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_zero_nxv8f32: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64V-NEXT: vmv.v.i v8, 0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, float zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -77,12 +85,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vfmv.v.f v8, fa0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8f64: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vfmv.v.f v8, fa0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, double %f, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -94,12 +104,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.i v8, 0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_zero_nxv8f64: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.i v8, 0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, double zeroinitializer, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -18,6 +19,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 %x, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -45,6 +48,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %c = icmp ne i32 %x, %y %head = insertelement undef, i1 %c, i32 0 @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -68,6 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -81,6 +87,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 %x, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -92,6 +99,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -103,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -116,6 +125,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 %x, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -138,6 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -151,6 +163,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 %x, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -162,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -173,6 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -186,6 +201,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 %x, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll @@ -9,12 +9,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.i v8, -1 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_1: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.i v8, -1 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -26,12 +28,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.i v8, 4 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_2: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.i v8, 4 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 4, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -44,6 +48,7 @@ ; RV32V-NEXT: addi a0, zero, 255 ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.x v8, a0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_3: @@ -51,6 +56,7 @@ ; RV64V-NEXT: addi a0, zero, 255 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 255, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -70,6 +76,7 @@ ; RV32V-NEXT: addi a0, sp, 8 ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_4: @@ -79,6 +86,7 @@ ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 4211079935, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -96,12 +104,14 @@ ; RV32V-NEXT: addi a0, sp, 8 ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_5: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 %a, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -113,12 +123,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vadd.vi v8, v8, 2 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_6: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vi v8, v8, 2 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 2, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,12 +143,14 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32V-NEXT: vadd.vi v8, v8, -1 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_7: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vi v8, v8, -1 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -150,6 +164,7 @@ ; RV32V-NEXT: addi a0, zero, 255 ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vadd.vx v8, v8, a0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_8: @@ -157,6 +172,7 @@ ; RV64V-NEXT: addi a0, zero, 255 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 255, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -171,6 +187,7 @@ ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vadd.vx v8, v8, a0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_9: @@ -179,6 +196,7 @@ ; RV64V-NEXT: addiw a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 2063596287, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -200,6 +218,7 @@ ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_10: @@ -209,6 +228,7 @@ ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 4211079935, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -231,6 +251,7 @@ ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_11: @@ -240,6 +261,7 @@ ; RV64V-NEXT: addi a0, a0, -1281 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 8506047231, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -259,12 +281,14 @@ ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vadd_vx_nxv8i64_12: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vadd.vx v8, v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %head = insertelement undef, i64 %a, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -277,6 +301,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32V-NEXT: vmv.v.x v8, a0 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_13: @@ -284,6 +309,7 @@ ; RV64V-NEXT: sext.w a0, a0 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %b = sext i32 %a to i64 %head = insertelement undef, i64 %b, i32 0 @@ -302,6 +328,7 @@ ; RV32V-NEXT: addi a0, sp, 8 ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 +; RV32V-NEXT: .cfi_def_cfa_offset 0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_14: @@ -310,6 +337,7 @@ ; RV64V-NEXT: srli a0, a0, 32 ; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64V-NEXT: vmv.v.x v8, a0 +; RV64V-NEXT: .cfi_def_cfa_offset 0 ; RV64V-NEXT: ret %b = zext i32 %a to i64 %head = insertelement undef, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -51,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -63,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -75,6 +81,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -85,6 +92,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -97,6 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -109,6 +118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -119,6 +129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -131,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -143,6 +155,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -153,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -165,6 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -177,6 +192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -187,6 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -199,6 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -221,6 +240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -233,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -245,6 +266,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -255,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -267,6 +290,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -279,6 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -289,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -301,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -313,6 +340,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -323,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -335,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -347,6 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -357,6 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -369,6 +401,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -381,6 +414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -391,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -403,6 +438,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -415,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -425,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -437,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -449,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -459,6 +499,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -471,6 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -483,6 +525,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -493,6 +536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -505,6 +549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -517,6 +562,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -527,6 +573,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -539,6 +586,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -551,6 +599,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -561,6 +610,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -573,6 +623,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -585,6 +636,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -595,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -607,6 +660,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -619,6 +673,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -629,6 +684,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -641,6 +697,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -654,6 +711,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -666,6 +724,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -676,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -688,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -701,6 +762,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -713,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -723,6 +786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -735,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -748,6 +813,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -760,6 +826,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = ashr %va, %vb ret %vc @@ -770,6 +837,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -782,6 +850,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -795,6 +864,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -17,6 +17,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -31,6 +32,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -41,6 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -53,6 +56,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -65,6 +69,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -79,6 +84,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -91,6 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -107,6 +114,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -117,6 +125,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -129,6 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -141,6 +151,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -155,6 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -167,6 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -183,6 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -193,6 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -205,6 +220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -217,6 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -231,6 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -243,6 +261,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -259,6 +278,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -269,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -281,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -307,6 +330,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -319,6 +343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -335,6 +360,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -345,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -357,6 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -369,6 +397,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -383,6 +412,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -395,6 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -411,6 +442,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -421,6 +453,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -433,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -445,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -459,6 +494,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -471,6 +507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -487,6 +524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -497,6 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -509,6 +548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -521,6 +561,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +576,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -547,6 +589,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -563,6 +606,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -573,6 +617,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -585,6 +630,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -597,6 +643,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -611,6 +658,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -623,6 +671,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -639,6 +688,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -649,6 +699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -661,6 +712,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -673,6 +725,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -687,6 +740,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -699,6 +753,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -715,6 +770,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -725,6 +781,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -737,6 +794,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -749,6 +807,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -763,6 +822,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -775,6 +835,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -791,6 +852,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -801,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -813,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -825,6 +889,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -839,6 +904,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -851,6 +917,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -867,6 +934,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -877,6 +945,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -889,6 +958,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -901,6 +971,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -915,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -927,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -943,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -953,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -965,6 +1040,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -977,6 +1053,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -991,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1003,6 +1081,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1019,6 +1098,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1029,6 +1109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1041,6 +1122,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1053,6 +1135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1067,6 +1150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1079,6 +1163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1095,6 +1180,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1105,6 +1191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1117,6 +1204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1129,6 +1217,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1143,6 +1232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1155,6 +1245,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1171,6 +1262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1181,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1193,6 +1286,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1205,6 +1299,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1219,6 +1314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1231,6 +1327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1247,6 +1344,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1257,6 +1355,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1269,6 +1368,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1281,6 +1381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1295,6 +1396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1307,6 +1409,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1323,6 +1426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1333,6 +1437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1345,6 +1450,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1357,6 +1463,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1371,6 +1478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1383,6 +1491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1399,6 +1508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1409,6 +1519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1421,12 +1532,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1439,12 +1552,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1459,6 +1574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1471,6 +1587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1487,6 +1604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1497,6 +1615,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1509,12 +1628,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1527,12 +1648,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1547,6 +1670,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1559,6 +1683,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1575,6 +1700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1585,6 +1711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1597,12 +1724,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1615,12 +1744,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1635,6 +1766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1647,6 +1779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1663,6 +1796,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv6i64( %va, %b, %m, i32 %evl) ret %v @@ -1675,6 +1809,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.ashr.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1685,6 +1820,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1697,12 +1833,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1715,12 +1853,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsra.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsra.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1735,6 +1875,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1747,6 +1888,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsra.vi v8, v8, 5 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 5, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -19,6 +20,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -31,6 +33,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -43,6 +46,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -55,6 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -67,6 +72,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -79,6 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -91,6 +98,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -103,6 +111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -115,6 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -139,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -151,6 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -163,6 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -175,6 +189,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -187,6 +202,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -199,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -211,6 +228,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -223,6 +241,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -235,6 +254,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -247,6 +267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -259,6 +280,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -271,6 +293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -283,6 +306,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -295,6 +319,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -307,6 +332,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 6 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 6, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -319,6 +345,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -331,6 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -343,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -355,6 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -367,6 +397,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -379,6 +410,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -391,6 +423,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -403,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -415,6 +449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -427,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -439,6 +475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -451,6 +488,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -464,6 +502,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -476,6 +515,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -488,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +542,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -513,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -525,6 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +582,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -550,6 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -562,6 +608,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 31 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 31, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -575,6 +622,7 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 32, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vand.vx v9, v9, a2 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -30,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -40,6 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -52,6 +55,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -64,6 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -78,6 +83,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -90,6 +96,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -106,6 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -116,6 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -128,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -140,6 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -154,6 +165,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -166,6 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -182,6 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -192,6 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -204,6 +219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -216,6 +232,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -230,6 +247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -242,6 +260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -258,6 +277,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -268,6 +288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -280,6 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -292,6 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -306,6 +329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -318,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -334,6 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -344,6 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -356,6 +383,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -368,6 +396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -382,6 +411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -394,6 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -410,6 +441,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -420,6 +452,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -432,6 +465,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -444,6 +478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -458,6 +493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -470,6 +506,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -486,6 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -496,6 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -508,6 +547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -520,6 +560,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -534,6 +575,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -546,6 +588,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -562,6 +605,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -572,6 +616,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -584,6 +629,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -596,6 +642,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -610,6 +657,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -622,6 +670,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -638,6 +687,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -648,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -660,6 +711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -672,6 +724,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -686,6 +739,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -698,6 +752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -714,6 +769,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -724,6 +780,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -736,6 +793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -748,6 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -762,6 +821,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -774,6 +834,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -790,6 +851,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -800,6 +862,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -812,6 +875,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -824,6 +888,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -838,6 +903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -850,6 +916,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -866,6 +933,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -876,6 +944,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -888,6 +957,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -900,6 +970,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -914,6 +985,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -926,6 +998,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -942,6 +1015,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -952,6 +1026,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -964,6 +1039,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -976,6 +1052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -990,6 +1067,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1002,6 +1080,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1018,6 +1097,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1028,6 +1108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1040,6 +1121,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1052,6 +1134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1066,6 +1149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1078,6 +1162,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1094,6 +1179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1104,6 +1190,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1116,6 +1203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1128,6 +1216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1142,6 +1231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1154,6 +1244,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1170,6 +1261,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1180,6 +1272,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1192,6 +1285,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1204,6 +1298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1218,6 +1313,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1230,6 +1326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1246,6 +1343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1256,6 +1354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1268,6 +1367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1280,6 +1380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1294,6 +1395,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1306,6 +1408,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1322,6 +1425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1332,6 +1436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1344,6 +1449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1356,6 +1462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1370,6 +1477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1382,6 +1490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1398,6 +1507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1408,6 +1518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1420,12 +1531,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1438,12 +1551,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1458,6 +1573,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1470,6 +1586,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1486,6 +1603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1496,6 +1614,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1508,12 +1627,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1526,12 +1647,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1546,6 +1669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1558,6 +1682,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1574,6 +1699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1584,6 +1710,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1596,12 +1723,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1614,12 +1743,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1634,6 +1765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1646,6 +1778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1662,6 +1795,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv5i64( %va, %b, %m, i32 %evl) ret %v @@ -1674,6 +1808,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.lshr.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1684,6 +1819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1696,12 +1832,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1714,12 +1852,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1734,6 +1874,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1746,6 +1887,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsrl.vi v8, v8, 4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 4, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i32 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i32 %vl) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i8( %val, %val, i8* %base, i32 %vl) @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i8( %val, %val, %val, i8* %base, i32 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -103,6 +109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i32 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -137,6 +145,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -153,6 +162,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -173,6 +183,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -190,6 +201,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -211,6 +223,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -229,6 +242,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -251,6 +265,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -270,6 +285,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -286,6 +302,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i8( %val, %val, i8* %base, i32 %vl) @@ -299,6 +316,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -316,6 +334,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv16i8( %val, %val, %val, i8* %base, i32 %vl) @@ -330,6 +349,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -348,6 +368,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i32 %vl) @@ -363,6 +384,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -379,6 +401,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i32( %val, %val, i32* %base, i32 %vl) @@ -392,6 +415,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i32( %val, %val, i32* %base, %mask, i32 %vl) @@ -409,6 +433,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i32( %val, %val, %val, i32* %base, i32 %vl) @@ -423,6 +448,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -441,6 +467,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i32 %vl) @@ -456,6 +483,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -475,6 +503,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -491,6 +520,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -511,6 +541,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -528,6 +559,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -549,6 +581,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -567,6 +600,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -589,6 +623,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -608,6 +643,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -624,6 +660,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i16( %val, %val, i16* %base, i32 %vl) @@ -637,6 +674,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i16( %val, %val, i16* %base, %mask, i32 %vl) @@ -654,6 +692,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i16( %val, %val, %val, i16* %base, i32 %vl) @@ -668,6 +707,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -686,6 +726,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i32 %vl) @@ -701,6 +742,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -720,6 +762,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -736,6 +779,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -756,6 +800,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -773,6 +818,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -794,6 +840,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -812,6 +859,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -834,6 +882,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -853,6 +902,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -869,6 +919,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i32( %val, %val, i32* %base, i32 %vl) @@ -882,6 +933,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i32( %val, %val, i32* %base, %mask, i32 %vl) @@ -899,6 +951,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i32( %val, %val, %val, i32* %base, i32 %vl) @@ -913,6 +966,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -931,6 +985,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i32 %vl) @@ -946,6 +1001,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -965,6 +1021,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -981,6 +1038,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -1001,6 +1059,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -1018,6 +1077,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -1039,6 +1099,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -1057,6 +1118,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -1079,6 +1141,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %vl) @@ -1098,6 +1161,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -1114,6 +1178,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i16( %val, %val, i16* %base, i32 %vl) @@ -1127,6 +1192,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i16( %val, %val, i16* %base, %mask, i32 %vl) @@ -1144,6 +1210,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8i16( %val, %val, %val, i16* %base, i32 %vl) @@ -1158,6 +1225,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1176,6 +1244,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1191,6 +1260,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1207,6 +1277,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i8( %val, %val, i8* %base, i32 %vl) @@ -1220,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -1237,6 +1309,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8i8( %val, %val, %val, i8* %base, i32 %vl) @@ -1251,6 +1324,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1269,6 +1343,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1284,6 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1303,6 +1379,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1319,6 +1396,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1339,6 +1417,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1356,6 +1435,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1377,6 +1457,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1395,6 +1476,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1417,6 +1499,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1436,6 +1519,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1452,6 +1536,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i32( %val, %val, i32* %base, i32 %vl) @@ -1465,6 +1550,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i32( %val, %val, i32* %base, %mask, i32 %vl) @@ -1481,6 +1567,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i8( %val, %val, i8* %base, i32 %vl) @@ -1494,6 +1581,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -1511,6 +1599,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i8( %val, %val, %val, i8* %base, i32 %vl) @@ -1525,6 +1614,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1543,6 +1633,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1558,6 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1577,6 +1669,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1593,6 +1686,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1613,6 +1707,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1630,6 +1725,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1651,6 +1747,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1669,6 +1766,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1691,6 +1789,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -1710,6 +1809,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -1726,6 +1826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i16( %val, %val, i16* %base, i32 %vl) @@ -1739,6 +1840,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i16( %val, %val, i16* %base, %mask, i32 %vl) @@ -1756,6 +1858,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i16( %val, %val, %val, i16* %base, i32 %vl) @@ -1770,6 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1788,6 +1892,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1803,6 +1908,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1822,6 +1928,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1838,6 +1945,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1858,6 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1875,6 +1984,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1896,6 +2006,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1914,6 +2025,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1936,6 +2048,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -1955,6 +2068,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -1971,6 +2085,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv32i8( %val, %val, i8* %base, i32 %vl) @@ -1984,6 +2099,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv32i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -2000,6 +2116,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i8( %val, %val, i8* %base, i32 %vl) @@ -2013,6 +2130,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i8( %val, %val, i8* %base, %mask, i32 %vl) @@ -2030,6 +2148,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i8( %val, %val, %val, i8* %base, i32 %vl) @@ -2044,6 +2163,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2062,6 +2182,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i32 %vl) @@ -2077,6 +2198,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2096,6 +2218,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -2112,6 +2235,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2132,6 +2256,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -2149,6 +2274,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2170,6 +2296,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -2188,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2210,6 +2338,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %vl) @@ -2229,6 +2358,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i32 %vl) @@ -2245,6 +2375,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i16( %val, %val, i16* %base, i32 %vl) @@ -2258,6 +2389,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i16( %val, %val, i16* %base, %mask, i32 %vl) @@ -2275,6 +2407,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i16( %val, %val, %val, i16* %base, i32 %vl) @@ -2289,6 +2422,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2307,6 +2441,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i32 %vl) @@ -2322,6 +2457,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2341,6 +2477,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -2357,6 +2494,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2377,6 +2515,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -2394,6 +2533,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2415,6 +2555,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -2433,6 +2574,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2455,6 +2597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %vl) @@ -2474,6 +2617,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i32 %vl) @@ -2490,6 +2634,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i32( %val, %val, i32* %base, i32 %vl) @@ -2503,6 +2648,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i32( %val, %val, i32* %base, %mask, i32 %vl) @@ -2520,6 +2666,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i32( %val, %val, %val, i32* %base, i32 %vl) @@ -2534,6 +2681,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -2552,6 +2700,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i32 %vl) @@ -2567,6 +2716,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i32 %vl) @@ -2583,6 +2733,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16f16( %val, %val, half* %base, i32 %vl) @@ -2596,6 +2747,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16f16( %val, %val, half* %base, %mask, i32 %vl) @@ -2612,6 +2764,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f64( %val, %val, double* %base, i32 %vl) @@ -2625,6 +2778,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f64( %val, %val, double* %base, %mask, i32 %vl) @@ -2641,6 +2795,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f64( %val, %val, double* %base, i32 %vl) @@ -2654,6 +2809,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f64( %val, %val, double* %base, %mask, i32 %vl) @@ -2671,6 +2827,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f64( %val, %val, %val, double* %base, i32 %vl) @@ -2685,6 +2842,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2703,6 +2861,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f64( %val, %val, %val, %val, double* %base, i32 %vl) @@ -2718,6 +2877,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2737,6 +2897,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f64( %val, %val, %val, %val, %val, double* %base, i32 %vl) @@ -2753,6 +2914,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2773,6 +2935,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i32 %vl) @@ -2790,6 +2953,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2811,6 +2975,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %vl) @@ -2829,6 +2994,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2851,6 +3017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %vl) @@ -2870,6 +3037,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -2886,6 +3054,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f32( %val, %val, float* %base, i32 %vl) @@ -2899,6 +3068,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f32( %val, %val, float* %base, %mask, i32 %vl) @@ -2916,6 +3086,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f32( %val, %val, %val, float* %base, i32 %vl) @@ -2930,6 +3101,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i32 %vl) @@ -2948,6 +3120,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f32( %val, %val, %val, %val, float* %base, i32 %vl) @@ -2963,6 +3136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -2982,6 +3156,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2f32( %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -2998,6 +3173,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3018,6 +3194,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3035,6 +3212,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3056,6 +3234,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3074,6 +3253,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3096,6 +3276,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3115,6 +3296,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3131,6 +3313,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f16( %val, %val, half* %base, i32 %vl) @@ -3144,6 +3327,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f16( %val, %val, half* %base, %mask, i32 %vl) @@ -3161,6 +3345,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f16( %val, %val, %val, half* %base, i32 %vl) @@ -3175,6 +3360,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3193,6 +3379,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f16( %val, %val, %val, %val, half* %base, i32 %vl) @@ -3208,6 +3395,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3227,6 +3415,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f16( %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3243,6 +3432,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3263,6 +3453,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3280,6 +3471,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3301,6 +3493,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3319,6 +3512,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3341,6 +3535,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3360,6 +3555,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3376,6 +3572,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f32( %val, %val, float* %base, i32 %vl) @@ -3389,6 +3586,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f32( %val, %val, float* %base, %mask, i32 %vl) @@ -3406,6 +3604,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f32( %val, %val, %val, float* %base, i32 %vl) @@ -3420,6 +3619,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3438,6 +3638,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f32( %val, %val, %val, %val, float* %base, i32 %vl) @@ -3453,6 +3654,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3472,6 +3674,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f32( %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3488,6 +3691,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3508,6 +3712,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3525,6 +3730,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3546,6 +3752,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3564,6 +3771,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3586,6 +3794,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %vl) @@ -3605,6 +3814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i32 %vl) @@ -3621,6 +3831,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8f16( %val, %val, half* %base, i32 %vl) @@ -3634,6 +3845,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8f16( %val, %val, half* %base, %mask, i32 %vl) @@ -3651,6 +3863,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8f16( %val, %val, %val, half* %base, i32 %vl) @@ -3665,6 +3878,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3683,6 +3897,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8f16( %val, %val, %val, %val, half* %base, i32 %vl) @@ -3698,6 +3913,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3714,6 +3930,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8f32( %val, %val, float* %base, i32 %vl) @@ -3727,6 +3944,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8f32( %val, %val, float* %base, %mask, i32 %vl) @@ -3743,6 +3961,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f64( %val, %val, double* %base, i32 %vl) @@ -3756,6 +3975,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f64( %val, %val, double* %base, %mask, i32 %vl) @@ -3773,6 +3993,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f64( %val, %val, %val, double* %base, i32 %vl) @@ -3787,6 +4008,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i32 %vl) @@ -3805,6 +4027,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f64( %val, %val, %val, %val, double* %base, i32 %vl) @@ -3820,6 +4043,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i32 %vl) @@ -3836,6 +4060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f16( %val, %val, half* %base, i32 %vl) @@ -3849,6 +4074,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f16( %val, %val, half* %base, %mask, i32 %vl) @@ -3866,6 +4092,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4f16( %val, %val, %val, half* %base, i32 %vl) @@ -3880,6 +4107,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3898,6 +4126,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4f16( %val, %val, %val, %val, half* %base, i32 %vl) @@ -3913,6 +4142,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3932,6 +4162,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4f16( %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3948,6 +4179,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -3968,6 +4200,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -3985,6 +4218,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4006,6 +4240,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4024,6 +4259,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4046,6 +4282,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4065,6 +4302,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4081,6 +4319,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f16( %val, %val, half* %base, i32 %vl) @@ -4094,6 +4333,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f16( %val, %val, half* %base, %mask, i32 %vl) @@ -4111,6 +4351,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f16( %val, %val, %val, half* %base, i32 %vl) @@ -4125,6 +4366,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4143,6 +4385,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f16( %val, %val, %val, %val, half* %base, i32 %vl) @@ -4158,6 +4401,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4177,6 +4421,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2f16( %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4193,6 +4438,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4213,6 +4459,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4230,6 +4477,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4251,6 +4499,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4269,6 +4518,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4291,6 +4541,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %vl) @@ -4310,6 +4561,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i32 %vl) @@ -4326,6 +4578,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f32( %val, %val, float* %base, i32 %vl) @@ -4339,6 +4592,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f32( %val, %val, float* %base, %mask, i32 %vl) @@ -4356,6 +4610,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4f32( %val, %val, %val, float* %base, i32 %vl) @@ -4370,6 +4625,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i32 %vl) @@ -4388,6 +4644,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4f32( %val, %val, %val, %val, float* %base, i32 %vl) @@ -4403,6 +4660,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i64 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i32( %val, %val, i32* %base, i64 %vl) @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i32( %val, %val, %val, i32* %base, i64 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -103,6 +109,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -134,6 +142,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i8( %val, %val, i8* %base, i64 %vl) @@ -147,6 +156,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -164,6 +174,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv16i8( %val, %val, %val, i8* %base, i64 %vl) @@ -178,6 +189,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -196,6 +208,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %vl) @@ -211,6 +224,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -227,6 +241,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i64( %val, %val, i64* %base, i64 %vl) @@ -240,6 +255,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl) @@ -257,6 +273,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i64( %val, %val, %val, i64* %base, i64 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -289,6 +307,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %vl) @@ -304,6 +323,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -323,6 +343,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %vl) @@ -339,6 +360,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -359,6 +381,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) @@ -376,6 +399,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -397,6 +421,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) @@ -415,6 +440,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -437,6 +463,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) @@ -456,6 +483,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -472,6 +500,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i32( %val, %val, i32* %base, i64 %vl) @@ -485,6 +514,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl) @@ -502,6 +532,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i32( %val, %val, %val, i32* %base, i64 %vl) @@ -516,6 +547,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -534,6 +566,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %vl) @@ -549,6 +582,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -568,6 +602,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -584,6 +619,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -604,6 +640,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -621,6 +658,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -642,6 +680,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -660,6 +699,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -682,6 +722,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -701,6 +742,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -717,6 +759,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i16( %val, %val, i16* %base, i64 %vl) @@ -730,6 +773,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl) @@ -747,6 +791,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8i16( %val, %val, %val, i16* %base, i64 %vl) @@ -761,6 +806,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -779,6 +825,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %vl) @@ -794,6 +841,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -810,6 +858,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i8( %val, %val, i8* %base, i64 %vl) @@ -823,6 +872,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -840,6 +890,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i8( %val, %val, %val, i8* %base, i64 %vl) @@ -854,6 +905,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -872,6 +924,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %vl) @@ -887,6 +940,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -906,6 +960,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -922,6 +977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -942,6 +998,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -959,6 +1016,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -980,6 +1038,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -998,6 +1057,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1020,6 +1080,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1039,6 +1100,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1055,6 +1117,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i16( %val, %val, i16* %base, i64 %vl) @@ -1068,6 +1131,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl) @@ -1085,6 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i16( %val, %val, %val, i16* %base, i64 %vl) @@ -1099,6 +1164,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1117,6 +1183,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1132,6 +1199,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1151,6 +1219,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1167,6 +1236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1187,6 +1257,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1204,6 +1275,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1225,6 +1297,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1243,6 +1316,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1265,6 +1339,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1284,6 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1300,6 +1376,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i32( %val, %val, i32* %base, i64 %vl) @@ -1313,6 +1390,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl) @@ -1330,6 +1408,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i32( %val, %val, %val, i32* %base, i64 %vl) @@ -1344,6 +1423,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1362,6 +1442,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %vl) @@ -1377,6 +1458,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1396,6 +1478,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -1412,6 +1495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1432,6 +1516,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -1449,6 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1470,6 +1556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -1488,6 +1575,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1510,6 +1598,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) @@ -1529,6 +1618,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) @@ -1545,6 +1635,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i8( %val, %val, i8* %base, i64 %vl) @@ -1558,6 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -1575,6 +1667,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8i8( %val, %val, %val, i8* %base, i64 %vl) @@ -1589,6 +1682,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1607,6 +1701,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1622,6 +1717,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1641,6 +1737,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1657,6 +1754,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1677,6 +1775,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1694,6 +1793,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1715,6 +1815,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1733,6 +1834,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1755,6 +1857,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -1774,6 +1877,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -1790,6 +1894,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i64( %val, %val, i64* %base, i64 %vl) @@ -1803,6 +1908,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl) @@ -1819,6 +1925,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4i16( %val, %val, i16* %base, i64 %vl) @@ -1832,6 +1939,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl) @@ -1849,6 +1957,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4i16( %val, %val, %val, i16* %base, i64 %vl) @@ -1863,6 +1972,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1881,6 +1991,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1896,6 +2007,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1915,6 +2027,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1931,6 +2044,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1951,6 +2065,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -1968,6 +2083,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -1989,6 +2105,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2007,6 +2124,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2029,6 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2048,6 +2167,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2064,6 +2184,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1i8( %val, %val, i8* %base, i64 %vl) @@ -2077,6 +2198,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -2094,6 +2216,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1i8( %val, %val, %val, i8* %base, i64 %vl) @@ -2108,6 +2231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2126,6 +2250,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2141,6 +2266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2160,6 +2286,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2176,6 +2303,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2196,6 +2324,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2213,6 +2342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2234,6 +2364,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2252,6 +2383,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2274,6 +2406,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2293,6 +2426,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2309,6 +2443,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i8( %val, %val, i8* %base, i64 %vl) @@ -2322,6 +2457,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -2339,6 +2475,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i8( %val, %val, %val, i8* %base, i64 %vl) @@ -2353,6 +2490,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2371,6 +2509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2386,6 +2525,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2405,6 +2545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2421,6 +2562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2441,6 +2583,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2458,6 +2601,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2479,6 +2623,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2497,6 +2642,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2519,6 +2665,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) @@ -2538,6 +2685,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) @@ -2554,6 +2702,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8i32( %val, %val, i32* %base, i64 %vl) @@ -2567,6 +2716,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl) @@ -2583,6 +2733,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv32i8( %val, %val, i8* %base, i64 %vl) @@ -2596,6 +2747,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl) @@ -2612,6 +2764,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i16( %val, %val, i16* %base, i64 %vl) @@ -2625,6 +2778,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl) @@ -2642,6 +2796,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i16( %val, %val, %val, i16* %base, i64 %vl) @@ -2656,6 +2811,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2674,6 +2830,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2689,6 +2846,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2708,6 +2866,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2724,6 +2883,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2744,6 +2904,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2761,6 +2922,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2782,6 +2944,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2800,6 +2963,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2822,6 +2986,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) @@ -2841,6 +3006,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) @@ -2857,6 +3023,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2i64( %val, %val, i64* %base, i64 %vl) @@ -2870,6 +3037,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl) @@ -2887,6 +3055,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2i64( %val, %val, %val, i64* %base, i64 %vl) @@ -2901,6 +3070,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -2919,6 +3089,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %vl) @@ -2934,6 +3105,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) @@ -2950,6 +3122,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16f16( %val, %val, half* %base, i64 %vl) @@ -2963,6 +3136,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl) @@ -2979,6 +3153,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f64( %val, %val, double* %base, i64 %vl) @@ -2992,6 +3167,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl) @@ -3008,6 +3184,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f64( %val, %val, double* %base, i64 %vl) @@ -3021,6 +3198,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl) @@ -3038,6 +3216,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f64( %val, %val, %val, double* %base, i64 %vl) @@ -3052,6 +3231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3070,6 +3250,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f64( %val, %val, %val, %val, double* %base, i64 %vl) @@ -3085,6 +3266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3104,6 +3286,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f64( %val, %val, %val, %val, %val, double* %base, i64 %vl) @@ -3120,6 +3303,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3140,6 +3324,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) @@ -3157,6 +3342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3178,6 +3364,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) @@ -3196,6 +3383,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3218,6 +3406,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) @@ -3237,6 +3426,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -3253,6 +3443,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f32( %val, %val, float* %base, i64 %vl) @@ -3266,6 +3457,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl) @@ -3283,6 +3475,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f32( %val, %val, %val, float* %base, i64 %vl) @@ -3297,6 +3490,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3315,6 +3509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f32( %val, %val, %val, %val, float* %base, i64 %vl) @@ -3330,6 +3525,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3349,6 +3545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2f32( %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3365,6 +3562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3385,6 +3583,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3402,6 +3601,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3423,6 +3623,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3441,6 +3642,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3463,6 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3482,6 +3685,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3498,6 +3702,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f16( %val, %val, half* %base, i64 %vl) @@ -3511,6 +3716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl) @@ -3528,6 +3734,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f16( %val, %val, %val, half* %base, i64 %vl) @@ -3542,6 +3749,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3560,6 +3768,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f16( %val, %val, %val, %val, half* %base, i64 %vl) @@ -3575,6 +3784,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3594,6 +3804,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -3610,6 +3821,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3630,6 +3842,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -3647,6 +3860,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3668,6 +3882,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -3686,6 +3901,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3708,6 +3924,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -3727,6 +3944,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -3743,6 +3961,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv1f32( %val, %val, float* %base, i64 %vl) @@ -3756,6 +3975,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl) @@ -3773,6 +3993,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv1f32( %val, %val, %val, float* %base, i64 %vl) @@ -3787,6 +4008,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3805,6 +4027,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv1f32( %val, %val, %val, %val, float* %base, i64 %vl) @@ -3820,6 +4043,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3839,6 +4063,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv1f32( %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3855,6 +4080,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3875,6 +4101,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3892,6 +4119,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3913,6 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3931,6 +4160,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3953,6 +4183,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) @@ -3972,6 +4203,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) @@ -3988,6 +4220,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8f16( %val, %val, half* %base, i64 %vl) @@ -4001,6 +4234,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl) @@ -4018,6 +4252,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv8f16( %val, %val, %val, half* %base, i64 %vl) @@ -4032,6 +4267,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4050,6 +4286,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv8f16( %val, %val, %val, %val, half* %base, i64 %vl) @@ -4065,6 +4302,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4081,6 +4319,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv8f32( %val, %val, float* %base, i64 %vl) @@ -4094,6 +4333,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl) @@ -4110,6 +4350,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f64( %val, %val, double* %base, i64 %vl) @@ -4123,6 +4364,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl) @@ -4140,6 +4382,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f64( %val, %val, %val, double* %base, i64 %vl) @@ -4154,6 +4397,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl) @@ -4172,6 +4416,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f64( %val, %val, %val, %val, double* %base, i64 %vl) @@ -4187,6 +4432,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) @@ -4203,6 +4449,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f16( %val, %val, half* %base, i64 %vl) @@ -4216,6 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl) @@ -4233,6 +4481,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4f16( %val, %val, %val, half* %base, i64 %vl) @@ -4247,6 +4496,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4265,6 +4515,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4f16( %val, %val, %val, %val, half* %base, i64 %vl) @@ -4280,6 +4531,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4299,6 +4551,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv4f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4315,6 +4568,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4335,6 +4589,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4352,6 +4607,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4373,6 +4629,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4391,6 +4648,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4413,6 +4671,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4432,6 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4448,6 +4708,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv2f16( %val, %val, half* %base, i64 %vl) @@ -4461,6 +4722,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl) @@ -4478,6 +4740,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv2f16( %val, %val, %val, half* %base, i64 %vl) @@ -4492,6 +4755,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4510,6 +4774,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv2f16( %val, %val, %val, %val, half* %base, i64 %vl) @@ -4525,6 +4790,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4544,6 +4810,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.nxv2f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4560,6 +4827,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4580,6 +4848,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4597,6 +4866,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4618,6 +4888,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4636,6 +4907,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4658,6 +4930,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) @@ -4677,6 +4950,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) @@ -4693,6 +4967,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv4f32( %val, %val, float* %base, i64 %vl) @@ -4706,6 +4981,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl) @@ -4723,6 +4999,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.nxv4f32( %val, %val, %val, float* %base, i64 %vl) @@ -4737,6 +5014,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl) @@ -4755,6 +5033,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.nxv4f32( %val, %val, %val, %val, float* %base, i64 %vl) @@ -4770,6 +5049,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i8( %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i8( %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -103,6 +109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -137,6 +145,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -153,6 +162,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -173,6 +183,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -190,6 +201,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -211,6 +223,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -229,6 +242,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -251,6 +265,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -270,6 +285,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -286,6 +302,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -299,6 +316,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -316,6 +334,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv16i8( %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -330,6 +349,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv16i8( %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -348,6 +368,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -363,6 +384,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -379,6 +401,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i32( %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -392,6 +415,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i32( %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -409,6 +433,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i32( %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -423,6 +448,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i32( %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -441,6 +467,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -456,6 +483,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -475,6 +503,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -491,6 +520,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -511,6 +541,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -528,6 +559,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -549,6 +581,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -567,6 +600,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -589,6 +623,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -608,6 +643,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -624,6 +660,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i16( %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -637,6 +674,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i16( %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -654,6 +692,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i16( %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -668,6 +707,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i16( %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -686,6 +726,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -701,6 +742,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -720,6 +762,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -736,6 +779,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -756,6 +800,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -773,6 +818,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -794,6 +840,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -812,6 +859,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -834,6 +882,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -853,6 +902,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -869,6 +919,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i32( %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -882,6 +933,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i32( %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -899,6 +951,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i32( %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -913,6 +966,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i32( %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -931,6 +985,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -946,6 +1001,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -965,6 +1021,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -981,6 +1038,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -1001,6 +1059,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -1018,6 +1077,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -1039,6 +1099,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -1057,6 +1118,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -1079,6 +1141,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -1098,6 +1161,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -1114,6 +1178,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i16( %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1127,6 +1192,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i16( %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1144,6 +1210,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8i16( %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1158,6 +1225,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8i16( %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1176,6 +1244,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1191,6 +1260,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1207,6 +1277,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1220,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1237,6 +1309,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8i8( %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1251,6 +1324,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8i8( %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1269,6 +1343,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1284,6 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1303,6 +1379,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1319,6 +1396,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1339,6 +1417,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1356,6 +1435,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1377,6 +1457,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1395,6 +1476,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1417,6 +1499,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1436,6 +1519,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1452,6 +1536,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i32( %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -1465,6 +1550,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i32( %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -1481,6 +1567,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1494,6 +1581,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1511,6 +1599,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i8( %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1525,6 +1614,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i8( %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1543,6 +1633,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1558,6 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1577,6 +1669,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1593,6 +1686,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1613,6 +1707,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1630,6 +1725,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1651,6 +1747,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1669,6 +1766,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1691,6 +1789,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1710,6 +1809,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -1726,6 +1826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i16( %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1739,6 +1840,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i16( %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1756,6 +1858,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i16( %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1770,6 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i16( %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1788,6 +1892,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1803,6 +1908,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1822,6 +1928,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1838,6 +1945,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1858,6 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1875,6 +1984,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1896,6 +2006,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1914,6 +2025,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1936,6 +2048,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -1955,6 +2068,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -1971,6 +2085,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv32i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -1984,6 +2099,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv32i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2000,6 +2116,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i8( %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2013,6 +2130,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i8( %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2030,6 +2148,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i8( %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2044,6 +2163,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i8( %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2062,6 +2182,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2077,6 +2198,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2096,6 +2218,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2112,6 +2235,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2132,6 +2256,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2149,6 +2274,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2170,6 +2296,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2188,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2210,6 +2338,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, i32 %vl) @@ -2229,6 +2358,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i32 %offset, %mask, i32 %vl) @@ -2245,6 +2375,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i16( %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2258,6 +2389,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i16( %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2275,6 +2407,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i16( %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2289,6 +2422,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i16( %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2307,6 +2441,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2322,6 +2457,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2341,6 +2477,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2357,6 +2494,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2377,6 +2515,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2394,6 +2533,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2415,6 +2555,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2433,6 +2574,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2455,6 +2597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, i32 %vl) @@ -2474,6 +2617,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i32 %offset, %mask, i32 %vl) @@ -2490,6 +2634,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i32( %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -2503,6 +2648,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i32( %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -2520,6 +2666,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i32( %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -2534,6 +2681,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i32( %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -2552,6 +2700,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i32 %offset, i32 %vl) @@ -2567,6 +2716,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, i32 %offset, %mask, i32 %vl) @@ -2583,6 +2733,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16f16( %val, %val, half* %base, i32 %offset, i32 %vl) @@ -2596,6 +2747,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16f16( %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -2612,6 +2764,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f64( %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2625,6 +2778,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f64( %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2641,6 +2795,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f64( %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2654,6 +2809,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f64( %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2671,6 +2827,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f64( %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2685,6 +2842,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f64( %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2703,6 +2861,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f64( %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2718,6 +2877,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f64( %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2737,6 +2897,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f64( %val, %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2753,6 +2914,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2773,6 +2935,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2790,6 +2953,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2811,6 +2975,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2829,6 +2994,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2851,6 +3017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -2870,6 +3037,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -2886,6 +3054,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f32( %val, %val, float* %base, i32 %offset, i32 %vl) @@ -2899,6 +3068,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f32( %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -2916,6 +3086,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f32( %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -2930,6 +3101,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f32( %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -2948,6 +3120,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f32( %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -2963,6 +3136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f32( %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -2982,6 +3156,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2f32( %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -2998,6 +3173,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3018,6 +3194,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3035,6 +3212,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3056,6 +3234,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3074,6 +3253,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3096,6 +3276,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3115,6 +3296,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3131,6 +3313,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f16( %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3144,6 +3327,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f16( %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3161,6 +3345,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f16( %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3175,6 +3360,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f16( %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3193,6 +3379,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f16( %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3208,6 +3395,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f16( %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3227,6 +3415,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3243,6 +3432,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3263,6 +3453,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3280,6 +3471,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3301,6 +3493,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3319,6 +3512,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3341,6 +3535,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3360,6 +3555,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3376,6 +3572,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f32( %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3389,6 +3586,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f32( %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3406,6 +3604,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f32( %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3420,6 +3619,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f32( %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3438,6 +3638,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f32( %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3453,6 +3654,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f32( %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3472,6 +3674,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f32( %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3488,6 +3691,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3508,6 +3712,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3525,6 +3730,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3546,6 +3752,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3564,6 +3771,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3586,6 +3794,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3605,6 +3814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3621,6 +3831,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8f16( %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3634,6 +3845,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8f16( %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3651,6 +3863,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8f16( %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3665,6 +3878,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8f16( %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3683,6 +3897,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8f16( %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3698,6 +3913,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8f16( %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3714,6 +3930,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8f32( %val, %val, float* %base, i32 %offset, i32 %vl) @@ -3727,6 +3944,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8f32( %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -3743,6 +3961,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f64( %val, %val, double* %base, i32 %offset, i32 %vl) @@ -3756,6 +3975,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f64( %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -3773,6 +3993,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f64( %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -3787,6 +4008,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f64( %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -3805,6 +4027,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f64( %val, %val, %val, %val, double* %base, i32 %offset, i32 %vl) @@ -3820,6 +4043,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f64( %val, %val, %val, %val, double* %base, i32 %offset, %mask, i32 %vl) @@ -3836,6 +4060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f16( %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3849,6 +4074,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f16( %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3866,6 +4092,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4f16( %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3880,6 +4107,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4f16( %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3898,6 +4126,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4f16( %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3913,6 +4142,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4f16( %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3932,6 +4162,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3948,6 +4179,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -3968,6 +4200,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -3985,6 +4218,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4006,6 +4240,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4024,6 +4259,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4046,6 +4282,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4065,6 +4302,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4081,6 +4319,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f16( %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4094,6 +4333,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f16( %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4111,6 +4351,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f16( %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4125,6 +4366,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f16( %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4143,6 +4385,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f16( %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4158,6 +4401,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f16( %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4177,6 +4421,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4193,6 +4438,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4213,6 +4459,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4230,6 +4477,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4251,6 +4499,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4269,6 +4518,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4291,6 +4541,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, i32 %vl) @@ -4310,6 +4561,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i32 %offset, %mask, i32 %vl) @@ -4326,6 +4578,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f32( %val, %val, float* %base, i32 %offset, i32 %vl) @@ -4339,6 +4592,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f32( %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -4356,6 +4610,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4f32( %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -4370,6 +4625,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4f32( %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) @@ -4388,6 +4644,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4f32( %val, %val, %val, %val, float* %base, i32 %offset, i32 %vl) @@ -4403,6 +4660,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4f32( %val, %val, %val, %val, float* %base, i32 %offset, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll @@ -12,6 +12,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -25,6 +26,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -41,6 +43,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i32( %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -54,6 +57,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -71,6 +75,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -85,6 +90,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -103,6 +109,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -134,6 +142,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -147,6 +156,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -164,6 +174,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv16i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -178,6 +189,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv16i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -196,6 +208,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -211,6 +224,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -227,6 +241,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i64( %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -240,6 +255,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -257,6 +273,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i64( %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i64( %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -289,6 +307,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -304,6 +323,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -323,6 +343,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -339,6 +360,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -359,6 +381,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -376,6 +399,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -397,6 +421,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -415,6 +440,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -437,6 +463,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -456,6 +483,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -472,6 +500,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i32( %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -485,6 +514,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -502,6 +532,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -516,6 +547,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -534,6 +566,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -549,6 +582,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -568,6 +602,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -584,6 +619,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -604,6 +640,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -621,6 +658,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -642,6 +680,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -660,6 +699,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -682,6 +722,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -701,6 +742,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -717,6 +759,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i16( %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -730,6 +773,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -747,6 +791,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -761,6 +806,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -779,6 +825,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -794,6 +841,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -810,6 +858,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -823,6 +872,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -840,6 +890,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -854,6 +905,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -872,6 +924,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -887,6 +940,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -906,6 +960,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -922,6 +977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -942,6 +998,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -959,6 +1016,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -980,6 +1038,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -998,6 +1057,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1020,6 +1080,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1039,6 +1100,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1055,6 +1117,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i16( %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1068,6 +1131,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1085,6 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1099,6 +1164,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1117,6 +1183,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1132,6 +1199,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1151,6 +1219,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1167,6 +1236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1187,6 +1257,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1204,6 +1275,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1225,6 +1297,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1243,6 +1316,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1265,6 +1339,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1284,6 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1300,6 +1376,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i32( %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1313,6 +1390,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1330,6 +1408,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1344,6 +1423,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1362,6 +1442,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1377,6 +1458,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1396,6 +1478,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1412,6 +1495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1432,6 +1516,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1449,6 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1470,6 +1556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1488,6 +1575,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1510,6 +1598,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -1529,6 +1618,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -1545,6 +1635,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1558,6 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1575,6 +1667,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1589,6 +1682,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1607,6 +1701,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1622,6 +1717,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1641,6 +1737,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1657,6 +1754,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1677,6 +1775,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1694,6 +1793,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1715,6 +1815,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1733,6 +1834,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1755,6 +1857,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -1774,6 +1877,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -1790,6 +1894,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i64( %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -1803,6 +1908,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -1819,6 +1925,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4i16( %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1832,6 +1939,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1849,6 +1957,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1863,6 +1972,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1881,6 +1991,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1896,6 +2007,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1915,6 +2027,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1931,6 +2044,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1951,6 +2065,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -1968,6 +2083,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -1989,6 +2105,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2007,6 +2124,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2029,6 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2048,6 +2167,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2064,6 +2184,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2077,6 +2198,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2094,6 +2216,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2108,6 +2231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2126,6 +2250,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2141,6 +2266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2160,6 +2286,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2176,6 +2303,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2196,6 +2324,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2213,6 +2342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2234,6 +2364,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2252,6 +2383,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2274,6 +2406,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2293,6 +2426,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2309,6 +2443,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2322,6 +2457,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2339,6 +2475,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2353,6 +2490,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2371,6 +2509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2386,6 +2525,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2405,6 +2545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2421,6 +2562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2441,6 +2583,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2458,6 +2601,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2479,6 +2623,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2497,6 +2642,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2519,6 +2665,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2538,6 +2685,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2554,6 +2702,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8i32( %val, %val, i32* %base, i64 %offset, i64 %vl) @@ -2567,6 +2716,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) @@ -2583,6 +2733,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv32i8( %val, %val, i8* %base, i64 %offset, i64 %vl) @@ -2596,6 +2747,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv32i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) @@ -2612,6 +2764,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i16( %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2625,6 +2778,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2642,6 +2796,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2656,6 +2811,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2674,6 +2830,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2689,6 +2846,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2708,6 +2866,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2724,6 +2883,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2744,6 +2904,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2761,6 +2922,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2782,6 +2944,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2800,6 +2963,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2822,6 +2986,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) @@ -2841,6 +3006,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) @@ -2857,6 +3023,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2i64( %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -2870,6 +3037,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -2887,6 +3055,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2i64( %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -2901,6 +3070,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2i64( %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -2919,6 +3089,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) @@ -2934,6 +3105,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) @@ -2950,6 +3122,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16f16( %val, %val, half* %base, i64 %offset, i64 %vl) @@ -2963,6 +3136,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16f16( %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -2979,6 +3153,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f64( %val, %val, double* %base, i64 %offset, i64 %vl) @@ -2992,6 +3167,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f64( %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3008,6 +3184,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f64( %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3021,6 +3198,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f64( %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3038,6 +3216,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f64( %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3052,6 +3231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f64( %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3070,6 +3250,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f64( %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3085,6 +3266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f64( %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3104,6 +3286,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f64( %val, %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3120,6 +3303,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3140,6 +3324,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3157,6 +3342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3178,6 +3364,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3196,6 +3383,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3218,6 +3406,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -3237,6 +3426,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -3253,6 +3443,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f32( %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3266,6 +3457,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f32( %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3283,6 +3475,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f32( %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3297,6 +3490,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f32( %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3315,6 +3509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f32( %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3330,6 +3525,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f32( %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3349,6 +3545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2f32( %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3365,6 +3562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3385,6 +3583,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3402,6 +3601,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3423,6 +3623,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3441,6 +3642,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3463,6 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3482,6 +3685,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3498,6 +3702,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f16( %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3511,6 +3716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f16( %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3528,6 +3734,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f16( %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3542,6 +3749,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f16( %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3560,6 +3768,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f16( %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3575,6 +3784,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f16( %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3594,6 +3804,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3610,6 +3821,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3630,6 +3842,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3647,6 +3860,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3668,6 +3882,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3686,6 +3901,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3708,6 +3924,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -3727,6 +3944,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -3743,6 +3961,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv1f32( %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3756,6 +3975,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv1f32( %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3773,6 +3993,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv1f32( %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3787,6 +4008,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv1f32( %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3805,6 +4027,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv1f32( %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3820,6 +4043,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv1f32( %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3839,6 +4063,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv1f32( %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3855,6 +4080,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3875,6 +4101,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3892,6 +4119,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3913,6 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3931,6 +4160,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3953,6 +4183,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -3972,6 +4203,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -3988,6 +4220,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8f16( %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4001,6 +4234,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8f16( %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4018,6 +4252,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv8f16( %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4032,6 +4267,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv8f16( %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4050,6 +4286,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv8f16( %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4065,6 +4302,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv8f16( %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4081,6 +4319,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv8f32( %val, %val, float* %base, i64 %offset, i64 %vl) @@ -4094,6 +4333,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv8f32( %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -4110,6 +4350,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f64( %val, %val, double* %base, i64 %offset, i64 %vl) @@ -4123,6 +4364,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f64( %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -4140,6 +4382,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f64( %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -4154,6 +4397,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f64( %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -4172,6 +4416,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f64( %val, %val, %val, %val, double* %base, i64 %offset, i64 %vl) @@ -4187,6 +4432,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f64( %val, %val, %val, %val, double* %base, i64 %offset, %mask, i64 %vl) @@ -4203,6 +4449,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f16( %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4216,6 +4463,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f16( %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4233,6 +4481,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4f16( %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4247,6 +4496,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4f16( %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4265,6 +4515,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4f16( %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4280,6 +4531,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4f16( %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4299,6 +4551,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv4f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4315,6 +4568,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4335,6 +4589,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4352,6 +4607,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4373,6 +4629,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4391,6 +4648,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4413,6 +4671,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4432,6 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4448,6 +4708,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv2f16( %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4461,6 +4722,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv2f16( %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4478,6 +4740,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv2f16( %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4492,6 +4755,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv2f16( %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4510,6 +4774,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv2f16( %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4525,6 +4790,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv2f16( %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4544,6 +4810,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.nxv2f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4560,6 +4827,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg5.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4580,6 +4848,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4597,6 +4866,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4618,6 +4888,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4636,6 +4907,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4658,6 +4930,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, i64 %vl) @@ -4677,6 +4950,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %offset, %mask, i64 %vl) @@ -4693,6 +4967,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv4f32( %val, %val, float* %base, i64 %offset, i64 %vl) @@ -4706,6 +4981,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv4f32( %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -4723,6 +4999,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.nxv4f32( %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -4737,6 +5014,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg3.mask.nxv4f32( %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) @@ -4755,6 +5033,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.nxv4f32( %val, %val, %val, %val, float* %base, i64 %offset, i64 %vl) @@ -4770,6 +5049,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg4.mask.nxv4f32( %val, %val, %val, %val, float* %base, i64 %offset, %mask, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i8( %va, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -34,6 +36,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i8( %va, %b) ret %v @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -71,6 +76,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i8( %va, %b) ret %v @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -122,6 +131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i8( %va, %b) ret %v @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -145,6 +156,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i8( %va, %b) ret %v @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -182,6 +196,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -196,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv32i8( %va, %b) ret %v @@ -206,6 +222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -219,6 +236,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv64i8( %va, %b) ret %v @@ -243,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -256,6 +276,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -270,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i16( %va, %b) ret %v @@ -280,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +316,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i16( %va, %b) ret %v @@ -317,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -330,6 +356,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -344,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i16( %va, %b) ret %v @@ -354,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -367,6 +396,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -381,6 +411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i16( %va, %b) ret %v @@ -391,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -404,6 +436,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -418,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i16( %va, %b) ret %v @@ -428,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -441,6 +476,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -455,6 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv32i16( %va, %b) ret %v @@ -465,6 +502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -478,6 +516,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -492,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i32( %va, %b) ret %v @@ -502,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +556,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -529,6 +571,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i32( %va, %b) ret %v @@ -539,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -552,6 +596,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -566,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i32( %va, %b) ret %v @@ -576,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +636,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -603,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i32( %va, %b) ret %v @@ -613,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -626,6 +676,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -640,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i32( %va, %b) ret %v @@ -650,6 +702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -663,6 +716,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -677,6 +731,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i64( %va, %b) ret %v @@ -694,12 +749,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_nxv1i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +770,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -727,6 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i64( %va, %b) ret %v @@ -744,12 +803,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_nxv2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -763,6 +824,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -777,6 +839,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i64( %va, %b) ret %v @@ -794,12 +857,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_nxv4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -813,6 +878,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -827,6 +893,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vssub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i64( %va, %b) ret %v @@ -844,12 +911,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub_nxv8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vssub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -863,6 +932,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vssub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i8( %va, %b) ret %v @@ -21,6 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -34,6 +36,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -48,6 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i8( %va, %b) ret %v @@ -58,6 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -71,6 +76,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i8( %va, %b) ret %v @@ -95,6 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -108,6 +116,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -122,6 +131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i8( %va, %b) ret %v @@ -132,6 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -145,6 +156,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i8( %va, %b) ret %v @@ -169,6 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -182,6 +196,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -196,6 +211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv32i8( %va, %b) ret %v @@ -206,6 +222,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -219,6 +236,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -233,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv64i8( %va, %b) ret %v @@ -243,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -256,6 +276,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -270,6 +291,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i16( %va, %b) ret %v @@ -280,6 +302,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -293,6 +316,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -307,6 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i16( %va, %b) ret %v @@ -317,6 +342,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -330,6 +356,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -344,6 +371,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i16( %va, %b) ret %v @@ -354,6 +382,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -367,6 +396,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -381,6 +411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i16( %va, %b) ret %v @@ -391,6 +422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -404,6 +436,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -418,6 +451,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i16( %va, %b) ret %v @@ -428,6 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -441,6 +476,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -455,6 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv32i16( %va, %b) ret %v @@ -465,6 +502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -478,6 +516,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -492,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i32( %va, %b) ret %v @@ -502,6 +542,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -515,6 +556,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -529,6 +571,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i32( %va, %b) ret %v @@ -539,6 +582,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -552,6 +596,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -566,6 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i32( %va, %b) ret %v @@ -576,6 +622,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -589,6 +636,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -603,6 +651,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i32( %va, %b) ret %v @@ -613,6 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -626,6 +676,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -640,6 +691,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i32( %va, %b) ret %v @@ -650,6 +702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -663,6 +716,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -677,6 +731,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i64( %va, %b) ret %v @@ -694,12 +749,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_nxv1i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -713,6 +770,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -727,6 +785,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i64( %va, %b) ret %v @@ -744,12 +803,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_nxv2i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -763,6 +824,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -777,6 +839,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i64( %va, %b) ret %v @@ -794,12 +857,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_nxv4i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -813,6 +878,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -827,6 +893,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i64( %va, %b) ret %v @@ -844,12 +911,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usub_nxv8i64_vx: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vssubu.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -863,6 +932,7 @@ ; CHECK-NEXT: addi a0, zero, 2 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vssubu.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 2, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -30,6 +32,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -43,6 +46,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %heada = insertelement undef, i8 2, i32 0 %splata = shufflevector %heada, undef, zeroinitializer @@ -57,6 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -67,6 +72,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -80,6 +86,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -92,6 +99,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -102,6 +110,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -115,6 +124,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -127,6 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -137,6 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -150,6 +162,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -162,6 +175,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -172,6 +186,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -185,6 +200,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -197,6 +213,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -207,6 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -220,6 +238,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -232,6 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -242,6 +262,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -255,6 +276,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -267,6 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -277,6 +300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -290,6 +314,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -312,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -325,6 +352,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -337,6 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -347,6 +376,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -360,6 +390,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -372,6 +403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -382,6 +414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -395,6 +428,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -407,6 +441,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -417,6 +452,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -430,6 +466,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -442,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -452,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -465,6 +504,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -477,6 +517,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -487,6 +528,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -500,6 +542,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -512,6 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -522,6 +566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -535,6 +580,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -547,6 +593,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -557,6 +604,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -570,6 +618,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -582,6 +631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -592,6 +642,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -605,6 +656,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -617,6 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -627,6 +680,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -640,6 +694,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -652,6 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -669,12 +725,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -688,6 +746,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -700,6 +759,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -717,12 +777,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -736,6 +798,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -748,6 +811,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -765,12 +829,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -784,6 +850,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -796,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = sub %va, %vb ret %vc @@ -813,12 +881,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -832,6 +902,7 @@ ; CHECK-NEXT: addi a0, zero, 1 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -75,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -97,6 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -109,6 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -125,6 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -135,6 +145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -147,6 +158,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -159,6 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv5i8( %va, %b, %m, i32 %evl) ret %v @@ -185,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -197,6 +212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -209,6 +225,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -225,6 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -235,6 +253,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -247,6 +266,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -259,6 +279,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -275,6 +296,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -285,6 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -297,6 +320,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -309,6 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -325,6 +350,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -335,6 +361,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -347,6 +374,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -359,6 +387,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -375,6 +404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -385,6 +415,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -397,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -409,6 +441,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -425,6 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -435,6 +469,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -447,6 +482,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -459,6 +495,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -475,6 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -485,6 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -497,6 +536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -509,6 +549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -525,6 +566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -535,6 +577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -547,6 +590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -559,6 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -575,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -585,6 +631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -597,6 +644,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +657,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -625,6 +674,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -635,6 +685,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -647,6 +698,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -659,6 +711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -675,6 +728,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -685,6 +739,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -697,6 +752,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -709,6 +765,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -725,6 +782,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -735,6 +793,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -747,6 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -759,6 +819,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -775,6 +836,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -785,6 +847,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -797,6 +860,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -809,6 +873,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -825,6 +890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -835,6 +901,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -847,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -859,6 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -875,6 +944,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -885,6 +955,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -897,6 +968,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -909,6 +981,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -925,6 +998,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -935,6 +1009,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -947,6 +1022,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -959,6 +1035,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -975,6 +1052,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -985,6 +1063,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1005,12 +1084,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1031,12 +1112,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1053,6 +1136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -1063,6 +1147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1083,12 +1168,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1109,12 +1196,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1131,6 +1220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -1141,6 +1231,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1161,12 +1252,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1187,12 +1280,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1209,6 +1304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -1219,6 +1315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1239,12 +1336,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1265,12 +1364,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vsub.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i32 %vl) @@ -27,6 +28,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i32 %vl) @@ -58,6 +61,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -74,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i32 %vl) @@ -87,6 +92,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -104,6 +110,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i32 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -135,6 +143,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i32 %vl) @@ -149,6 +158,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -166,6 +176,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i32 %vl) @@ -180,6 +191,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -197,6 +209,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -211,6 +224,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -228,6 +242,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -242,6 +257,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -259,6 +275,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -273,6 +290,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -291,6 +309,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -306,6 +325,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -324,6 +344,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -339,6 +360,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -357,6 +379,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -372,6 +395,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -391,6 +415,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -407,6 +432,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -426,6 +452,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -442,6 +469,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -461,6 +489,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -477,6 +506,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -497,6 +527,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -514,6 +545,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -534,6 +566,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -551,6 +584,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -571,6 +605,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -588,6 +623,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -609,6 +645,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -627,6 +664,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -648,6 +686,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -666,6 +705,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -687,6 +727,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -705,6 +746,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -727,6 +769,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -746,6 +789,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -768,6 +812,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -787,6 +832,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -809,6 +855,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -828,6 +875,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -844,6 +892,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i32 %vl) @@ -857,6 +906,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -874,6 +924,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i32 %vl) @@ -888,6 +939,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -904,6 +956,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i32 %vl) @@ -917,6 +970,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -934,6 +988,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -948,6 +1003,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -965,6 +1021,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -979,6 +1036,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -996,6 +1054,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1010,6 +1069,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1028,6 +1088,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1043,6 +1104,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1061,6 +1123,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1076,6 +1139,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1094,6 +1158,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -1109,6 +1174,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -1126,6 +1192,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i32 %vl) @@ -1140,6 +1207,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1157,6 +1225,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i32 %vl) @@ -1171,6 +1240,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1188,6 +1258,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i32 %vl) @@ -1202,6 +1273,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1219,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1233,6 +1306,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1250,6 +1324,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1264,6 +1339,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1281,6 +1357,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1295,6 +1372,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1313,6 +1391,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1328,6 +1407,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1346,6 +1426,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1361,6 +1442,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1379,6 +1461,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1394,6 +1477,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1413,6 +1497,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1429,6 +1514,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1448,6 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1464,6 +1551,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1483,6 +1571,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1499,6 +1588,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1519,6 +1609,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1536,6 +1627,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1556,6 +1648,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1573,6 +1666,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1593,6 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1610,6 +1705,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1631,6 +1727,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1649,6 +1746,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1670,6 +1768,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1688,6 +1787,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1709,6 +1809,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1727,6 +1828,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1749,6 +1851,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1768,6 +1871,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1790,6 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1809,6 +1914,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1831,6 +1937,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -1850,6 +1957,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -1867,6 +1975,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i32 %vl) @@ -1881,6 +1990,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1898,6 +2008,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i32 %vl) @@ -1912,6 +2023,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1928,6 +2040,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i32 %vl) @@ -1941,6 +2054,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1958,6 +2072,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -1972,6 +2087,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -1989,6 +2105,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2003,6 +2120,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2020,6 +2138,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2034,6 +2153,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2052,6 +2172,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2067,6 +2188,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2085,6 +2207,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2100,6 +2223,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2118,6 +2242,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2133,6 +2258,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2152,6 +2278,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2168,6 +2295,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2187,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2203,6 +2332,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2222,6 +2352,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2238,6 +2369,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2258,6 +2390,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2275,6 +2408,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2295,6 +2429,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2312,6 +2447,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2332,6 +2468,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2349,6 +2486,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2370,6 +2508,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2388,6 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2409,6 +2549,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2427,6 +2568,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2448,6 +2590,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2466,6 +2609,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2488,6 +2632,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2507,6 +2652,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2529,6 +2675,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2548,6 +2695,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2570,6 +2718,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -2589,6 +2738,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -2606,6 +2756,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i32 %vl) @@ -2620,6 +2771,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2637,6 +2789,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i32 %vl) @@ -2651,6 +2804,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2668,6 +2822,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i32 %vl) @@ -2682,6 +2837,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2699,6 +2855,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2713,6 +2870,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2730,6 +2888,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2744,6 +2903,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2761,6 +2921,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2775,6 +2936,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2793,6 +2955,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2808,6 +2971,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2826,6 +2990,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2841,6 +3006,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2859,6 +3025,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2874,6 +3041,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2893,6 +3061,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2909,6 +3078,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2928,6 +3098,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2944,6 +3115,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2963,6 +3135,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -2979,6 +3152,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -2999,6 +3173,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3016,6 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3036,6 +3212,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3053,6 +3230,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3073,6 +3251,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3090,6 +3269,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3111,6 +3291,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3129,6 +3310,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3150,6 +3332,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3168,6 +3351,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3189,6 +3373,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3207,6 +3392,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3229,6 +3415,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3248,6 +3435,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3270,6 +3458,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3289,6 +3478,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3311,6 +3501,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -3330,6 +3521,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -3347,6 +3539,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i32 %vl) @@ -3361,6 +3554,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3378,6 +3572,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i32 %vl) @@ -3392,6 +3587,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3408,6 +3604,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i32 %vl) @@ -3421,6 +3618,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3438,6 +3636,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3452,6 +3651,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3469,6 +3669,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3483,6 +3684,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3500,6 +3702,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3514,6 +3717,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3532,6 +3736,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3547,6 +3752,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3565,6 +3771,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3580,6 +3787,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3598,6 +3806,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -3613,6 +3822,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -3629,6 +3839,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i32 %vl) @@ -3642,6 +3853,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3659,6 +3871,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i32 %vl) @@ -3673,6 +3886,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3689,6 +3903,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i32 %vl) @@ -3702,6 +3917,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3719,6 +3935,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3733,6 +3950,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3750,6 +3968,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3764,6 +3983,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3781,6 +4001,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3795,6 +4016,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3813,6 +4035,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3828,6 +4051,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3846,6 +4070,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3861,6 +4086,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3879,6 +4105,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3894,6 +4121,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3913,6 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3929,6 +4158,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3948,6 +4178,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3964,6 +4195,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -3983,6 +4215,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -3999,6 +4232,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4019,6 +4253,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4036,6 +4271,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4056,6 +4292,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4073,6 +4310,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4093,6 +4331,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4110,6 +4349,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4131,6 +4371,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4149,6 +4390,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4170,6 +4412,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4188,6 +4431,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4209,6 +4453,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4227,6 +4472,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4249,6 +4495,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4268,6 +4515,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4290,6 +4538,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4309,6 +4558,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4331,6 +4581,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4350,6 +4601,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4367,6 +4619,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i32 %vl) @@ -4381,6 +4634,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4398,6 +4652,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i32 %vl) @@ -4412,6 +4667,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4429,6 +4685,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i32 %vl) @@ -4443,6 +4700,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -4460,6 +4718,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i32 %vl) @@ -4474,6 +4733,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4491,6 +4751,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i32 %vl) @@ -4505,6 +4766,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4521,6 +4783,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i32 %vl) @@ -4534,6 +4797,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4551,6 +4815,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4565,6 +4830,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4582,6 +4848,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4596,6 +4863,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4613,6 +4881,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4627,6 +4896,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4645,6 +4915,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4660,6 +4931,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4678,6 +4950,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4693,6 +4966,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4711,6 +4985,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4726,6 +5001,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4745,6 +5021,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4761,6 +5038,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4780,6 +5058,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4796,6 +5075,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4815,6 +5095,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4831,6 +5112,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4851,6 +5133,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4868,6 +5151,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4888,6 +5172,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4905,6 +5190,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4925,6 +5211,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4942,6 +5229,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -4963,6 +5251,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -4981,6 +5270,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5002,6 +5292,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5020,6 +5311,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5041,6 +5333,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5059,6 +5352,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5081,6 +5375,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5100,6 +5395,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5122,6 +5418,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5141,6 +5438,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5163,6 +5461,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -5182,6 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5199,6 +5499,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i32 %vl) @@ -5213,6 +5514,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5230,6 +5532,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i32 %vl) @@ -5244,6 +5547,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5261,6 +5565,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i32 %vl) @@ -5275,6 +5580,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5292,6 +5598,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5306,6 +5613,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5323,6 +5631,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5337,6 +5646,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5354,6 +5664,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5368,6 +5679,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5386,6 +5698,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5401,6 +5714,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5419,6 +5733,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5434,6 +5749,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5452,6 +5768,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5467,6 +5784,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5486,6 +5804,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5502,6 +5821,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5521,6 +5841,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5537,6 +5858,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5556,6 +5878,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5572,6 +5895,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5592,6 +5916,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5609,6 +5934,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5629,6 +5955,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5646,6 +5973,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5666,6 +5994,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5683,6 +6012,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5704,6 +6034,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5722,6 +6053,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5743,6 +6075,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5761,6 +6094,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5782,6 +6116,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5800,6 +6135,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5822,6 +6158,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5841,6 +6178,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5863,6 +6201,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5882,6 +6221,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5904,6 +6244,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -5923,6 +6264,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -5939,6 +6281,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i32 %vl) @@ -5952,6 +6295,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -5969,6 +6313,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i32 %vl) @@ -5983,6 +6328,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6000,6 +6346,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i32 %vl) @@ -6014,6 +6361,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6031,6 +6379,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i32 %vl) @@ -6045,6 +6394,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6062,6 +6412,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i32 %vl) @@ -6076,6 +6427,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6093,6 +6445,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6107,6 +6460,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6124,6 +6478,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6138,6 +6493,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6155,6 +6511,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6169,6 +6526,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6187,6 +6545,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6202,6 +6561,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6220,6 +6580,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6235,6 +6596,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6253,6 +6615,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6268,6 +6631,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6287,6 +6651,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6303,6 +6668,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6322,6 +6688,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6338,6 +6705,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6357,6 +6725,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6373,6 +6742,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6393,6 +6763,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6410,6 +6781,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6430,6 +6802,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6447,6 +6820,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6467,6 +6841,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6484,6 +6859,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6505,6 +6881,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6523,6 +6900,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6544,6 +6922,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6562,6 +6941,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6583,6 +6963,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6601,6 +6982,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6623,6 +7005,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6642,6 +7025,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6664,6 +7048,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6683,6 +7068,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6705,6 +7091,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i32 %vl) @@ -6724,6 +7111,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) @@ -6741,6 +7129,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i32 %vl) @@ -6755,6 +7144,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6772,6 +7162,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i32 %vl) @@ -6786,6 +7177,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6803,6 +7195,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i32 %vl) @@ -6817,6 +7210,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6834,6 +7228,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6848,6 +7243,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6865,6 +7261,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6879,6 +7276,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6896,6 +7294,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6910,6 +7309,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6928,6 +7328,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6943,6 +7344,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6961,6 +7363,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -6976,6 +7379,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -6994,6 +7398,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7009,6 +7414,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7028,6 +7434,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7044,6 +7451,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7063,6 +7471,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7079,6 +7488,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7098,6 +7508,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7114,6 +7525,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7134,6 +7546,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7151,6 +7564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7171,6 +7585,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7188,6 +7603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7208,6 +7624,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7225,6 +7642,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7246,6 +7664,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7264,6 +7683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7285,6 +7705,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7303,6 +7724,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7324,6 +7746,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7342,6 +7765,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7364,6 +7788,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7383,6 +7808,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7405,6 +7831,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7424,6 +7851,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7446,6 +7874,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i32 %vl) @@ -7465,6 +7894,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) @@ -7482,6 +7912,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i32 %vl) @@ -7496,6 +7927,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7513,6 +7945,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i32 %vl) @@ -7527,6 +7960,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7544,6 +7978,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i32 %vl) @@ -7558,6 +7993,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7575,6 +8011,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7589,6 +8026,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7606,6 +8044,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7620,6 +8059,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7637,6 +8077,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7651,6 +8092,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7669,6 +8111,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7684,6 +8127,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7702,6 +8146,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7717,6 +8162,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7735,6 +8181,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i32 %vl) @@ -7750,6 +8197,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) @@ -7767,6 +8215,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i32 %vl) @@ -7781,6 +8230,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7798,6 +8248,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i32 %vl) @@ -7812,6 +8263,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7828,6 +8280,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i32 %vl) @@ -7841,6 +8294,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -7858,6 +8312,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i32 %vl) @@ -7872,6 +8327,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7889,6 +8345,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i32 %vl) @@ -7903,6 +8360,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7920,6 +8378,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i32 %vl) @@ -7934,6 +8393,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7951,6 +8411,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i32 %vl) @@ -7965,6 +8426,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -7982,6 +8444,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i32 %vl) @@ -7996,6 +8459,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8013,6 +8477,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i32 %vl) @@ -8027,6 +8492,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8044,6 +8510,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8058,6 +8525,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8075,6 +8543,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8089,6 +8558,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8106,6 +8576,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -8120,6 +8591,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8138,6 +8610,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8153,6 +8626,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8171,6 +8645,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8186,6 +8661,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8204,6 +8680,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8219,6 +8696,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8238,6 +8716,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8254,6 +8733,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8273,6 +8753,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8289,6 +8770,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8308,6 +8790,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8324,6 +8807,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8344,6 +8828,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8361,6 +8846,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8381,6 +8867,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8398,6 +8885,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8418,6 +8906,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8435,6 +8924,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8456,6 +8946,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8474,6 +8965,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8495,6 +8987,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8513,6 +9006,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8534,6 +9028,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8552,6 +9047,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8574,6 +9070,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8593,6 +9090,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8615,6 +9113,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8634,6 +9133,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8656,6 +9156,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -8675,6 +9176,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -8692,6 +9194,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i32 %vl) @@ -8706,6 +9209,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8723,6 +9227,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i32 %vl) @@ -8737,6 +9242,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8754,6 +9260,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i32 %vl) @@ -8768,6 +9275,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8785,6 +9293,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8799,6 +9308,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8816,6 +9326,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8830,6 +9341,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8847,6 +9359,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -8861,6 +9374,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8879,6 +9393,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8894,6 +9409,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8912,6 +9428,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8927,6 +9444,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8945,6 +9463,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8960,6 +9479,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -8979,6 +9499,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -8995,6 +9516,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9014,6 +9536,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9030,6 +9553,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9049,6 +9573,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9065,6 +9590,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9085,6 +9611,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9102,6 +9629,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9122,6 +9650,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9139,6 +9668,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9159,6 +9689,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9176,6 +9707,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9197,6 +9729,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9215,6 +9748,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9236,6 +9770,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9254,6 +9789,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9275,6 +9811,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9293,6 +9830,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9315,6 +9853,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9334,6 +9873,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9356,6 +9896,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9375,6 +9916,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9397,6 +9939,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -9416,6 +9959,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -9433,6 +9977,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i32 %vl) @@ -9447,6 +9992,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9464,6 +10010,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i32 %vl) @@ -9478,6 +10025,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9495,6 +10043,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i32 %vl) @@ -9509,6 +10058,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9526,6 +10076,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9540,6 +10091,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9557,6 +10109,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9571,6 +10124,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9588,6 +10142,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -9602,6 +10157,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9620,6 +10176,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9635,6 +10192,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9653,6 +10211,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9668,6 +10227,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9686,6 +10246,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9701,6 +10262,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9720,6 +10282,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9736,6 +10299,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9755,6 +10319,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9771,6 +10336,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9790,6 +10356,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9806,6 +10373,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9826,6 +10394,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9843,6 +10412,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9863,6 +10433,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9880,6 +10451,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9900,6 +10472,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9917,6 +10490,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9938,6 +10512,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9956,6 +10531,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -9977,6 +10553,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -9995,6 +10572,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10016,6 +10594,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10034,6 +10613,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10056,6 +10636,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10075,6 +10656,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10097,6 +10679,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10116,6 +10699,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10138,6 +10722,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -10157,6 +10742,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10174,6 +10760,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i32 %vl) @@ -10188,6 +10775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10205,6 +10793,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i32 %vl) @@ -10219,6 +10808,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10236,6 +10826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i32 %vl) @@ -10250,6 +10841,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10267,6 +10859,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10281,6 +10874,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10298,6 +10892,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10312,6 +10907,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10329,6 +10925,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -10343,6 +10940,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10361,6 +10959,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10376,6 +10975,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10394,6 +10994,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10409,6 +11010,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10427,6 +11029,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10442,6 +11045,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10461,6 +11065,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10477,6 +11082,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10496,6 +11102,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10512,6 +11119,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10531,6 +11139,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10547,6 +11156,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10567,6 +11177,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10584,6 +11195,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10604,6 +11216,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10621,6 +11234,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10641,6 +11255,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10658,6 +11273,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10679,6 +11295,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10697,6 +11314,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10718,6 +11336,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10736,6 +11355,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10757,6 +11377,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10775,6 +11396,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10797,6 +11419,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10816,6 +11439,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10838,6 +11462,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10857,6 +11482,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10879,6 +11505,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -10898,6 +11525,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -10915,6 +11543,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i32 %vl) @@ -10929,6 +11558,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10946,6 +11576,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i32 %vl) @@ -10960,6 +11591,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -10976,6 +11608,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i32 %vl) @@ -10989,6 +11622,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11006,6 +11640,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11020,6 +11655,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11037,6 +11673,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11051,6 +11688,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11068,6 +11706,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11082,6 +11721,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11100,6 +11740,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11115,6 +11756,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11133,6 +11775,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11148,6 +11791,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11166,6 +11810,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11181,6 +11826,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11198,6 +11844,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i32 %vl) @@ -11212,6 +11859,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11229,6 +11877,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i32 %vl) @@ -11243,6 +11892,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11260,6 +11910,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i32 %vl) @@ -11274,6 +11925,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -11291,6 +11943,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i32 %vl) @@ -11305,6 +11958,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11322,6 +11976,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i32 %vl) @@ -11336,6 +11991,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11353,6 +12009,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i32 %vl) @@ -11367,6 +12024,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11384,6 +12042,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11398,6 +12057,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11415,6 +12075,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11429,6 +12090,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11446,6 +12108,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) @@ -11460,6 +12123,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11478,6 +12142,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11493,6 +12158,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11511,6 +12177,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11526,6 +12193,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11544,6 +12212,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i32 %vl) @@ -11559,6 +12228,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) @@ -11576,6 +12246,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i32 %vl) @@ -11590,6 +12261,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11607,6 +12279,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i32 %vl) @@ -11621,6 +12294,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11637,6 +12311,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i32 %vl) @@ -11650,6 +12325,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11667,6 +12343,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11681,6 +12358,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11698,6 +12376,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11712,6 +12391,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11729,6 +12409,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -11743,6 +12424,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11761,6 +12443,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11776,6 +12459,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11794,6 +12478,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11809,6 +12494,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11827,6 +12513,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11842,6 +12529,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11861,6 +12549,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11877,6 +12566,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11896,6 +12586,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11912,6 +12603,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11931,6 +12623,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11947,6 +12640,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -11967,6 +12661,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -11984,6 +12679,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12004,6 +12700,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12021,6 +12718,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12041,6 +12739,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12058,6 +12757,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12079,6 +12779,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12097,6 +12798,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12118,6 +12820,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12136,6 +12839,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12157,6 +12861,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12175,6 +12880,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12197,6 +12903,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12216,6 +12923,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12238,6 +12946,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12257,6 +12966,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12279,6 +12989,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12298,6 +13009,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12315,6 +13027,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i32 %vl) @@ -12329,6 +13042,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12346,6 +13060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i32 %vl) @@ -12360,6 +13075,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12377,6 +13093,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i32 %vl) @@ -12391,6 +13108,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12408,6 +13126,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12422,6 +13141,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12439,6 +13159,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12453,6 +13174,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12470,6 +13192,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i32 %vl) @@ -12484,6 +13207,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12502,6 +13226,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12517,6 +13242,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12535,6 +13261,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12550,6 +13277,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12568,6 +13296,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12583,6 +13312,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12602,6 +13332,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12618,6 +13349,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12637,6 +13369,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12653,6 +13386,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12672,6 +13406,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12688,6 +13423,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12708,6 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12725,6 +13462,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12745,6 +13483,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12762,6 +13501,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12782,6 +13522,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12799,6 +13540,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12820,6 +13562,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12838,6 +13581,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12859,6 +13603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12877,6 +13622,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12898,6 +13644,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12916,6 +13663,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12938,6 +13686,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12957,6 +13706,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -12979,6 +13729,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -12998,6 +13749,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13020,6 +13772,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i32 %vl) @@ -13039,6 +13792,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) @@ -13056,6 +13810,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i32 %vl) @@ -13070,6 +13825,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13087,6 +13843,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i32 %vl) @@ -13101,6 +13858,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13118,6 +13876,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i32 %vl) @@ -13132,6 +13891,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13149,6 +13909,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13163,6 +13924,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13180,6 +13942,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13194,6 +13957,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13211,6 +13975,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i32 %vl) @@ -13225,6 +13990,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13243,6 +14009,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13258,6 +14025,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13276,6 +14044,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13291,6 +14060,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) @@ -13309,6 +14079,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i32 %vl) @@ -13324,6 +14095,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) @@ -27,6 +28,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -44,6 +46,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) @@ -58,6 +61,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -74,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) @@ -87,6 +92,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -104,6 +110,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) @@ -118,6 +125,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -135,6 +143,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) @@ -149,6 +158,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -165,6 +175,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) @@ -178,6 +189,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -195,6 +207,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) @@ -209,6 +222,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -226,6 +240,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -240,6 +255,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -257,6 +273,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -271,6 +288,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -288,6 +306,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -302,6 +321,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -319,6 +339,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -333,6 +354,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -351,6 +373,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -366,6 +389,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -384,6 +408,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -399,6 +424,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -417,6 +443,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -432,6 +459,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -450,6 +478,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -465,6 +494,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -481,6 +511,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) @@ -494,6 +525,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -511,6 +543,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) @@ -525,6 +558,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -541,6 +575,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) @@ -554,6 +589,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -571,6 +607,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -585,6 +622,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -602,6 +640,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -616,6 +655,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -633,6 +673,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -647,6 +688,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -665,6 +707,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -680,6 +723,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -698,6 +742,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -713,6 +758,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -731,6 +777,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -746,6 +793,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -763,6 +811,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, i64 %vl) @@ -777,6 +826,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -794,6 +844,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, i64 %vl) @@ -808,6 +859,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -825,6 +877,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, i64 %vl) @@ -839,6 +892,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -856,6 +910,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, i64 %vl) @@ -870,6 +925,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -887,6 +943,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -901,6 +958,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -918,6 +976,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -932,6 +991,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -949,6 +1009,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -963,6 +1024,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -980,6 +1042,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -994,6 +1057,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1012,6 +1076,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1027,6 +1092,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1045,6 +1111,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1060,6 +1127,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1078,6 +1146,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1093,6 +1162,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1111,6 +1181,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1126,6 +1197,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1145,6 +1217,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1161,6 +1234,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1180,6 +1254,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1196,6 +1271,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1215,6 +1291,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1231,6 +1308,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1250,6 +1328,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1266,6 +1345,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1286,6 +1366,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1303,6 +1384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1323,6 +1405,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1340,6 +1423,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1360,6 +1444,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1377,6 +1462,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1397,6 +1483,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1414,6 +1501,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1435,6 +1523,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1453,6 +1542,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1474,6 +1564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1492,6 +1583,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1513,6 +1605,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1531,6 +1624,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1552,6 +1646,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1570,6 +1665,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1592,6 +1688,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1611,6 +1708,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1633,6 +1731,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1652,6 +1751,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1674,6 +1774,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1693,6 +1794,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1715,6 +1817,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -1734,6 +1837,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -1751,6 +1855,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) @@ -1765,6 +1870,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1782,6 +1888,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) @@ -1796,6 +1903,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1813,6 +1921,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) @@ -1827,6 +1936,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1844,6 +1954,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) @@ -1858,6 +1969,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1875,6 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1889,6 +2002,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1906,6 +2020,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1920,6 +2035,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1937,6 +2053,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1951,6 +2068,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -1968,6 +2086,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -1982,6 +2101,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2000,6 +2120,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2015,6 +2136,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2033,6 +2155,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2048,6 +2171,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2066,6 +2190,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2081,6 +2206,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2099,6 +2225,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2114,6 +2241,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2133,6 +2261,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2149,6 +2278,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2168,6 +2298,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2184,6 +2315,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2203,6 +2335,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2219,6 +2352,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2238,6 +2372,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2254,6 +2389,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2274,6 +2410,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2291,6 +2428,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2311,6 +2449,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2328,6 +2467,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2348,6 +2488,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2365,6 +2506,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2385,6 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2402,6 +2545,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2423,6 +2567,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2441,6 +2586,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2462,6 +2608,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2480,6 +2627,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2501,6 +2649,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2519,6 +2668,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2540,6 +2690,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2558,6 +2709,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2580,6 +2732,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2599,6 +2752,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2621,6 +2775,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2640,6 +2795,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2662,6 +2818,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2681,6 +2838,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2703,6 +2861,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -2722,6 +2881,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -2739,6 +2899,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) @@ -2753,6 +2914,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2770,6 +2932,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) @@ -2784,6 +2947,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2800,6 +2964,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) @@ -2813,6 +2978,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2829,6 +2995,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) @@ -2842,6 +3009,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2859,6 +3027,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2873,6 +3042,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2890,6 +3060,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2904,6 +3075,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2921,6 +3093,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2935,6 +3108,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2952,6 +3126,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2966,6 +3141,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -2984,6 +3160,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -2999,6 +3176,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3017,6 +3195,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3032,6 +3211,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3050,6 +3230,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3065,6 +3246,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3083,6 +3265,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -3098,6 +3281,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -3114,6 +3298,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) @@ -3127,6 +3312,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3144,6 +3330,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) @@ -3158,6 +3345,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3174,6 +3362,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) @@ -3187,6 +3376,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3204,6 +3394,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) @@ -3218,6 +3409,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3235,6 +3427,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3249,6 +3442,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3266,6 +3460,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3280,6 +3475,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3297,6 +3493,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3311,6 +3508,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3328,6 +3526,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3342,6 +3541,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3360,6 +3560,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3375,6 +3576,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3393,6 +3595,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3408,6 +3611,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3426,6 +3630,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3441,6 +3646,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3459,6 +3665,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3474,6 +3681,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3493,6 +3701,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3509,6 +3718,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3528,6 +3738,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3544,6 +3755,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3563,6 +3775,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3579,6 +3792,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3598,6 +3812,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3614,6 +3829,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3634,6 +3850,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3651,6 +3868,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3671,6 +3889,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3688,6 +3907,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3708,6 +3928,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3725,6 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3745,6 +3967,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3762,6 +3985,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3783,6 +4007,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3801,6 +4026,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3822,6 +4048,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3840,6 +4067,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3861,6 +4089,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3879,6 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3900,6 +4130,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3918,6 +4149,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3940,6 +4172,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -3959,6 +4192,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -3981,6 +4215,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4000,6 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4022,6 +4258,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4041,6 +4278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4063,6 +4301,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -4082,6 +4321,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -4099,6 +4339,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) @@ -4113,6 +4354,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4130,6 +4372,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) @@ -4144,6 +4387,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4161,6 +4405,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) @@ -4175,6 +4420,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4192,6 +4438,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) @@ -4206,6 +4453,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4223,6 +4471,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4237,6 +4486,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4254,6 +4504,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4268,6 +4519,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4285,6 +4537,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4299,6 +4552,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4316,6 +4570,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4330,6 +4585,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4348,6 +4604,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4363,6 +4620,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4381,6 +4639,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4396,6 +4655,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4414,6 +4674,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4429,6 +4690,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4447,6 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4462,6 +4725,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4481,6 +4745,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4497,6 +4762,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4516,6 +4782,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4532,6 +4799,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4551,6 +4819,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4567,6 +4836,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4586,6 +4856,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4602,6 +4873,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4622,6 +4894,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4639,6 +4912,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4659,6 +4933,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4676,6 +4951,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4696,6 +4972,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4713,6 +4990,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4733,6 +5011,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4750,6 +5029,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4771,6 +5051,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4789,6 +5070,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4810,6 +5092,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4828,6 +5111,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4849,6 +5133,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4867,6 +5152,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4888,6 +5174,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4906,6 +5193,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4928,6 +5216,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4947,6 +5236,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -4969,6 +5259,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -4988,6 +5279,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5010,6 +5302,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5029,6 +5322,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5051,6 +5345,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -5070,6 +5365,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -5087,6 +5383,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) @@ -5101,6 +5398,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5118,6 +5416,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) @@ -5132,6 +5431,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5149,6 +5449,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) @@ -5163,6 +5464,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5179,6 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) @@ -5192,6 +5495,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5209,6 +5513,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5223,6 +5528,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5240,6 +5546,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5254,6 +5561,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5271,6 +5579,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5285,6 +5594,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5302,6 +5612,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5316,6 +5627,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5334,6 +5646,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5349,6 +5662,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5367,6 +5681,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5382,6 +5697,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5400,6 +5716,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5415,6 +5732,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5433,6 +5751,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5448,6 +5767,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5467,6 +5787,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5483,6 +5804,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5502,6 +5824,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5518,6 +5841,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5537,6 +5861,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5553,6 +5878,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5572,6 +5898,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5588,6 +5915,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5608,6 +5936,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5625,6 +5954,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5645,6 +5975,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5662,6 +5993,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5682,6 +6014,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5699,6 +6032,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5719,6 +6053,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5736,6 +6071,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5757,6 +6093,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5775,6 +6112,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5796,6 +6134,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5814,6 +6153,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5835,6 +6175,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5853,6 +6194,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5874,6 +6216,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5892,6 +6235,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5914,6 +6258,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5933,6 +6278,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5955,6 +6301,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -5974,6 +6321,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -5996,6 +6344,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6015,6 +6364,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6037,6 +6387,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) @@ -6056,6 +6407,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -6072,6 +6424,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) @@ -6085,6 +6438,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6102,6 +6456,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) @@ -6116,6 +6471,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6132,6 +6488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) @@ -6145,6 +6502,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6161,6 +6519,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) @@ -6174,6 +6533,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6191,6 +6551,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6205,6 +6566,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6222,6 +6584,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6236,6 +6599,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6253,6 +6617,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6267,6 +6632,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6284,6 +6650,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6298,6 +6665,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6316,6 +6684,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6331,6 +6700,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6349,6 +6719,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6364,6 +6735,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6382,6 +6754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6397,6 +6770,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6415,6 +6789,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6430,6 +6805,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6449,6 +6825,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6465,6 +6842,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6484,6 +6862,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6500,6 +6879,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6519,6 +6899,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6535,6 +6916,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6554,6 +6936,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6570,6 +6953,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6590,6 +6974,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6607,6 +6992,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6627,6 +7013,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6644,6 +7031,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6664,6 +7052,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6681,6 +7070,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6701,6 +7091,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6718,6 +7109,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6739,6 +7131,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6757,6 +7150,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6778,6 +7172,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6796,6 +7191,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6817,6 +7213,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6835,6 +7232,7 @@ ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6856,6 +7254,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6874,6 +7273,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6896,6 +7296,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6915,6 +7316,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6937,6 +7339,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6956,6 +7359,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -6978,6 +7382,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -6997,6 +7402,7 @@ ; CHECK-NEXT: vmv1r.v v15, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7019,6 +7425,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -7038,6 +7445,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -7055,6 +7463,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, i64 %vl) @@ -7069,6 +7478,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7086,6 +7496,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, i64 %vl) @@ -7100,6 +7511,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7117,6 +7529,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, i64 %vl) @@ -7131,6 +7544,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7148,6 +7562,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, i64 %vl) @@ -7162,6 +7577,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -7178,6 +7594,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) @@ -7191,6 +7608,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7208,6 +7626,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) @@ -7222,6 +7641,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7238,6 +7658,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) @@ -7251,6 +7672,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7268,6 +7690,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) @@ -7282,6 +7705,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7299,6 +7723,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7313,6 +7738,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7330,6 +7756,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7344,6 +7771,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7361,6 +7789,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7375,6 +7804,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7392,6 +7822,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7406,6 +7837,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7424,6 +7856,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7439,6 +7872,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7457,6 +7891,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7472,6 +7907,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7490,6 +7926,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7505,6 +7942,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7523,6 +7961,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7538,6 +7977,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7557,6 +7997,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7573,6 +8014,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7592,6 +8034,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7608,6 +8051,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7627,6 +8071,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7643,6 +8088,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7662,6 +8108,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7678,6 +8125,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7698,6 +8146,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7715,6 +8164,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7735,6 +8185,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7752,6 +8203,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7772,6 +8224,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7789,6 +8242,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7809,6 +8263,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7826,6 +8281,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7847,6 +8303,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7865,6 +8322,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7886,6 +8344,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7904,6 +8363,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7925,6 +8385,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7943,6 +8404,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -7964,6 +8426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -7982,6 +8445,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8004,6 +8468,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8023,6 +8488,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8045,6 +8511,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8064,6 +8531,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8086,6 +8554,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8105,6 +8574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8127,6 +8597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -8146,6 +8617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -8163,6 +8635,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) @@ -8177,6 +8650,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8194,6 +8668,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) @@ -8208,6 +8683,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8225,6 +8701,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) @@ -8239,6 +8716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8256,6 +8734,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) @@ -8270,6 +8749,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8287,6 +8767,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8301,6 +8782,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8318,6 +8800,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8332,6 +8815,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8349,6 +8833,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8363,6 +8848,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8380,6 +8866,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8394,6 +8881,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8412,6 +8900,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8427,6 +8916,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8445,6 +8935,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8460,6 +8951,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8478,6 +8970,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8493,6 +8986,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8511,6 +9005,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8526,6 +9021,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8545,6 +9041,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8561,6 +9058,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8580,6 +9078,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8596,6 +9095,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8615,6 +9115,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8631,6 +9132,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8650,6 +9152,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8666,6 +9169,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8686,6 +9190,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8703,6 +9208,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8723,6 +9229,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8740,6 +9247,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8760,6 +9268,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8777,6 +9286,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8797,6 +9307,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8814,6 +9325,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8835,6 +9347,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8853,6 +9366,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8874,6 +9388,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8892,6 +9407,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8913,6 +9429,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8931,6 +9448,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8952,6 +9470,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -8970,6 +9489,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -8992,6 +9512,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9011,6 +9532,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9033,6 +9555,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9052,6 +9575,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9074,6 +9598,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9093,6 +9618,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9115,6 +9641,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9134,6 +9661,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9151,6 +9679,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) @@ -9165,6 +9694,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9182,6 +9712,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) @@ -9196,6 +9727,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9213,6 +9745,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) @@ -9227,6 +9760,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9243,6 +9777,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) @@ -9256,6 +9791,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9273,6 +9809,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9287,6 +9824,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9304,6 +9842,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9318,6 +9857,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9335,6 +9875,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9349,6 +9890,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9366,6 +9908,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9380,6 +9923,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9398,6 +9942,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9413,6 +9958,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9431,6 +9977,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9446,6 +9993,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9464,6 +10012,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9479,6 +10028,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9497,6 +10047,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9512,6 +10063,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9531,6 +10083,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9547,6 +10100,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9566,6 +10120,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9582,6 +10137,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9601,6 +10157,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9617,6 +10174,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9636,6 +10194,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9652,6 +10211,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9672,6 +10232,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9689,6 +10250,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9709,6 +10271,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9726,6 +10289,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9746,6 +10310,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9763,6 +10328,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9783,6 +10349,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9800,6 +10367,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9821,6 +10389,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9839,6 +10408,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9860,6 +10430,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9878,6 +10449,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9899,6 +10471,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9917,6 +10490,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9938,6 +10512,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9956,6 +10531,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -9978,6 +10554,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -9997,6 +10574,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10019,6 +10597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10038,6 +10617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10060,6 +10640,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10079,6 +10660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10101,6 +10683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) @@ -10120,6 +10703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10137,6 +10721,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) @@ -10151,6 +10736,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10168,6 +10754,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) @@ -10182,6 +10769,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10198,6 +10786,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) @@ -10211,6 +10800,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10228,6 +10818,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) @@ -10242,6 +10833,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) @@ -10258,6 +10850,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) @@ -10271,6 +10864,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10288,6 +10882,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) @@ -10302,6 +10897,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) @@ -10319,6 +10915,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) @@ -10333,6 +10930,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10350,6 +10948,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) @@ -10364,6 +10963,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10381,6 +10981,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) @@ -10395,6 +10996,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10411,6 +11013,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) @@ -10424,6 +11027,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10441,6 +11045,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10455,6 +11060,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10472,6 +11078,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10486,6 +11093,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10503,6 +11111,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10517,6 +11126,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10534,6 +11144,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10548,6 +11159,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10566,6 +11178,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10581,6 +11194,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10599,6 +11213,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10614,6 +11229,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10632,6 +11248,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10647,6 +11264,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10665,6 +11283,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10680,6 +11299,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10699,6 +11319,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10715,6 +11336,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10734,6 +11356,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10750,6 +11373,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10769,6 +11393,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10785,6 +11410,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10804,6 +11430,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10820,6 +11447,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10840,6 +11468,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10857,6 +11486,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10877,6 +11507,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10894,6 +11525,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10914,6 +11546,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10931,6 +11564,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10951,6 +11585,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -10968,6 +11603,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -10989,6 +11625,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11007,6 +11644,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11028,6 +11666,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11046,6 +11685,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11067,6 +11707,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11085,6 +11726,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11106,6 +11748,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11124,6 +11767,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11146,6 +11790,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11165,6 +11810,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11187,6 +11833,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11206,6 +11853,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11228,6 +11876,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11247,6 +11896,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11269,6 +11919,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) @@ -11288,6 +11939,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) @@ -11305,6 +11957,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, i64 %vl) @@ -11319,6 +11972,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11336,6 +11990,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, i64 %vl) @@ -11350,6 +12005,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11367,6 +12023,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, i64 %vl) @@ -11381,6 +12038,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11398,6 +12056,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, i64 %vl) @@ -11412,6 +12071,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11429,6 +12089,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11443,6 +12104,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11460,6 +12122,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11474,6 +12137,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11491,6 +12155,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11505,6 +12170,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11522,6 +12188,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11536,6 +12203,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11554,6 +12222,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11569,6 +12238,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11587,6 +12257,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11602,6 +12273,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11620,6 +12292,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11635,6 +12308,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11653,6 +12327,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) @@ -11668,6 +12343,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) @@ -11685,6 +12361,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) @@ -11699,6 +12376,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11716,6 +12394,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) @@ -11730,6 +12409,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11746,6 +12426,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) @@ -11759,6 +12440,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -11776,6 +12458,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i64 %vl) @@ -11790,6 +12473,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11807,6 +12491,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i64 %vl) @@ -11821,6 +12506,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11838,6 +12524,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64( %val, %val, double* %base, %index, i64 %vl) @@ -11852,6 +12539,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11869,6 +12557,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i64 %vl) @@ -11883,6 +12572,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11900,6 +12590,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64( %val, %val, double* %base, %index, i64 %vl) @@ -11914,6 +12605,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11931,6 +12623,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i64 %vl) @@ -11945,6 +12638,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11962,6 +12656,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i64 %vl) @@ -11976,6 +12671,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -11993,6 +12689,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i64 %vl) @@ -12007,6 +12704,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12024,6 +12722,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12038,6 +12737,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12055,6 +12755,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12069,6 +12770,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12086,6 +12788,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12100,6 +12803,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12117,6 +12821,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -12131,6 +12836,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12149,6 +12855,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12164,6 +12871,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12182,6 +12890,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12197,6 +12906,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12215,6 +12925,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12230,6 +12941,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12248,6 +12960,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12263,6 +12976,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12282,6 +12996,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12298,6 +13013,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12317,6 +13033,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12333,6 +13050,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12352,6 +13070,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12368,6 +13087,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12387,6 +13107,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12403,6 +13124,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12423,6 +13145,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12440,6 +13163,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12460,6 +13184,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12477,6 +13202,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12497,6 +13223,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12514,6 +13241,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12534,6 +13262,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12551,6 +13280,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12572,6 +13302,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12590,6 +13321,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12611,6 +13343,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12629,6 +13362,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12650,6 +13384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12668,6 +13403,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12689,6 +13425,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12707,6 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12729,6 +13467,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12748,6 +13487,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12770,6 +13510,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12789,6 +13530,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12811,6 +13553,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12830,6 +13573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12852,6 +13596,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -12871,6 +13616,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -12888,6 +13634,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) @@ -12902,6 +13649,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12919,6 +13667,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) @@ -12933,6 +13682,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12950,6 +13700,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) @@ -12964,6 +13715,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -12980,6 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) @@ -12993,6 +13746,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13010,6 +13764,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13024,6 +13779,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13041,6 +13797,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13055,6 +13812,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13072,6 +13830,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13086,6 +13845,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13103,6 +13863,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -13117,6 +13878,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13135,6 +13897,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13150,6 +13913,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13168,6 +13932,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13183,6 +13948,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13201,6 +13967,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13216,6 +13983,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13234,6 +14002,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13249,6 +14018,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13268,6 +14038,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13284,6 +14055,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13303,6 +14075,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13319,6 +14092,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13338,6 +14112,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13354,6 +14129,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13373,6 +14149,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13389,6 +14166,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13409,6 +14187,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13426,6 +14205,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13446,6 +14226,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13463,6 +14244,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13483,6 +14265,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13500,6 +14283,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13520,6 +14304,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13537,6 +14322,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13558,6 +14344,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13576,6 +14363,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13597,6 +14385,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13615,6 +14404,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13636,6 +14426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13654,6 +14445,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13675,6 +14467,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13693,6 +14486,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13715,6 +14509,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13734,6 +14529,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13756,6 +14552,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13775,6 +14572,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13797,6 +14595,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13816,6 +14615,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13838,6 +14638,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -13857,6 +14658,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -13874,6 +14676,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) @@ -13888,6 +14691,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13905,6 +14709,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) @@ -13919,6 +14724,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13936,6 +14742,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) @@ -13950,6 +14757,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13967,6 +14775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) @@ -13981,6 +14790,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -13998,6 +14808,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14012,6 +14823,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14029,6 +14841,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14043,6 +14856,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14060,6 +14874,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14074,6 +14889,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14091,6 +14907,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -14105,6 +14922,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14123,6 +14941,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14138,6 +14957,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14156,6 +14976,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14171,6 +14992,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14189,6 +15011,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14204,6 +15027,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14222,6 +15046,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14237,6 +15062,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14256,6 +15082,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14272,6 +15099,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14291,6 +15119,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14307,6 +15136,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14326,6 +15156,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14342,6 +15173,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14361,6 +15193,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14377,6 +15210,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14397,6 +15231,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14414,6 +15249,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14434,6 +15270,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14451,6 +15288,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14471,6 +15309,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14488,6 +15327,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14508,6 +15348,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14525,6 +15366,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14546,6 +15388,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14564,6 +15407,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14585,6 +15429,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14603,6 +15448,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14624,6 +15470,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14642,6 +15489,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14663,6 +15511,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14681,6 +15530,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14703,6 +15553,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14722,6 +15573,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14744,6 +15596,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14763,6 +15616,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14785,6 +15639,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14804,6 +15659,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14826,6 +15682,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -14845,6 +15702,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -14862,6 +15720,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) @@ -14876,6 +15735,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14893,6 +15753,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) @@ -14907,6 +15768,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14924,6 +15786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) @@ -14938,6 +15801,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14955,6 +15819,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) @@ -14969,6 +15834,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -14986,6 +15852,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15000,6 +15867,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15017,6 +15885,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15031,6 +15900,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15048,6 +15918,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15062,6 +15933,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15079,6 +15951,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -15093,6 +15966,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15111,6 +15985,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15126,6 +16001,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15144,6 +16020,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15159,6 +16036,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15177,6 +16055,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15192,6 +16071,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15210,6 +16090,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15225,6 +16106,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15244,6 +16126,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15260,6 +16143,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15279,6 +16163,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15295,6 +16180,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15314,6 +16200,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15330,6 +16217,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15349,6 +16237,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15365,6 +16254,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15385,6 +16275,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15402,6 +16293,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15422,6 +16314,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15439,6 +16332,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15459,6 +16353,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15476,6 +16371,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15496,6 +16392,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15513,6 +16410,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15534,6 +16432,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15552,6 +16451,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15573,6 +16473,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15591,6 +16492,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15612,6 +16514,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15630,6 +16533,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15651,6 +16555,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15669,6 +16574,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15691,6 +16597,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15710,6 +16617,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15732,6 +16640,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15751,6 +16660,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15773,6 +16683,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15792,6 +16703,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15814,6 +16726,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -15833,6 +16746,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -15850,6 +16764,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) @@ -15864,6 +16779,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15881,6 +16797,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) @@ -15895,6 +16812,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15911,6 +16829,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) @@ -15924,6 +16843,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15940,6 +16860,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) @@ -15953,6 +16874,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -15970,6 +16892,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -15984,6 +16907,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16001,6 +16925,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16015,6 +16940,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16032,6 +16958,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16046,6 +16973,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16063,6 +16991,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16077,6 +17006,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16095,6 +17025,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16110,6 +17041,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16128,6 +17060,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16143,6 +17076,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16161,6 +17095,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16176,6 +17111,7 @@ ; CHECK-NEXT: vmv2r.v v14, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16194,6 +17130,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16209,6 +17146,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16226,6 +17164,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) @@ -16240,6 +17179,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16257,6 +17197,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) @@ -16271,6 +17212,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16287,6 +17229,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) @@ -16300,6 +17243,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16317,6 +17261,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) @@ -16331,6 +17276,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -16348,6 +17294,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i64 %vl) @@ -16362,6 +17309,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16379,6 +17327,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i64 %vl) @@ -16393,6 +17342,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16410,6 +17360,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i64 %vl) @@ -16424,6 +17375,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16441,6 +17393,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64( %val, %val, double* %base, %index, i64 %vl) @@ -16455,6 +17408,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16472,6 +17426,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16486,6 +17441,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16503,6 +17459,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16517,6 +17474,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16534,6 +17492,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16548,6 +17507,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16565,6 +17525,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) @@ -16579,6 +17540,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16597,6 +17559,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16612,6 +17575,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16630,6 +17594,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16645,6 +17610,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16663,6 +17629,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16678,6 +17645,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16696,6 +17664,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) @@ -16711,6 +17680,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) @@ -16727,6 +17697,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) @@ -16740,6 +17711,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16757,6 +17729,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) @@ -16771,6 +17744,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16787,6 +17761,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) @@ -16800,6 +17775,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16817,6 +17793,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) @@ -16831,6 +17808,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16848,6 +17826,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16862,6 +17841,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16879,6 +17859,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16893,6 +17874,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16910,6 +17892,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16924,6 +17907,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16941,6 +17925,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -16955,6 +17940,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -16973,6 +17959,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -16988,6 +17975,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17006,6 +17994,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17021,6 +18010,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17039,6 +18029,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17054,6 +18045,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17072,6 +18064,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17087,6 +18080,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17106,6 +18100,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17122,6 +18117,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17141,6 +18137,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17157,6 +18154,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17176,6 +18174,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17192,6 +18191,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17211,6 +18211,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17227,6 +18228,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17247,6 +18249,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17264,6 +18267,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17284,6 +18288,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17301,6 +18306,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17321,6 +18327,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17338,6 +18345,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17358,6 +18366,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17375,6 +18384,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17396,6 +18406,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17414,6 +18425,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17435,6 +18447,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17453,6 +18466,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17474,6 +18488,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17492,6 +18507,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17513,6 +18529,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17531,6 +18548,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17553,6 +18571,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17572,6 +18591,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17594,6 +18614,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17613,6 +18634,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17635,6 +18657,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17654,6 +18677,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17676,6 +18700,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17695,6 +18720,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17712,6 +18738,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) @@ -17726,6 +18753,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17743,6 +18771,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) @@ -17757,6 +18786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17774,6 +18804,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) @@ -17788,6 +18819,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17804,6 +18836,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) @@ -17817,6 +18850,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17834,6 +18868,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17848,6 +18883,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17865,6 +18901,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17879,6 +18916,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17896,6 +18934,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17910,6 +18949,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17927,6 +18967,7 @@ ; CHECK-NEXT: vmv1r.v v2, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) @@ -17941,6 +18982,7 @@ ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17959,6 +19001,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -17974,6 +19017,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -17992,6 +19036,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18007,6 +19052,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18025,6 +19071,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18040,6 +19087,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18058,6 +19106,7 @@ ; CHECK-NEXT: vmv1r.v v3, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18073,6 +19122,7 @@ ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18092,6 +19142,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18108,6 +19159,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18127,6 +19179,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18143,6 +19196,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18162,6 +19216,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18178,6 +19233,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18197,6 +19253,7 @@ ; CHECK-NEXT: vmv1r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18213,6 +19270,7 @@ ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg5ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18233,6 +19291,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18250,6 +19309,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18270,6 +19330,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18287,6 +19348,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18307,6 +19369,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18324,6 +19387,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18344,6 +19408,7 @@ ; CHECK-NEXT: vmv1r.v v5, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18361,6 +19426,7 @@ ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg6ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18382,6 +19448,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18400,6 +19467,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18421,6 +19489,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18439,6 +19508,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18460,6 +19530,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18478,6 +19549,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18499,6 +19571,7 @@ ; CHECK-NEXT: vmv1r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18517,6 +19590,7 @@ ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg7ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18539,6 +19613,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18558,6 +19633,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei32.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18580,6 +19656,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18599,6 +19676,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei8.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18621,6 +19699,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v0, (a0), v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18640,6 +19719,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei16.v v1, (a0), v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18662,6 +19742,7 @@ ; CHECK-NEXT: vmv1r.v v7, v0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) @@ -18681,6 +19762,7 @@ ; CHECK-NEXT: vmv1r.v v8, v1 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vsuxseg8ei64.v v1, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) @@ -18698,6 +19780,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) @@ -18712,6 +19795,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18729,6 +19813,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) @@ -18743,6 +19828,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18759,6 +19845,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) @@ -18772,6 +19859,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18789,6 +19877,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) @@ -18803,6 +19892,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18820,6 +19910,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18834,6 +19925,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18851,6 +19943,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18865,6 +19958,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18882,6 +19976,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18896,6 +19991,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18913,6 +20009,7 @@ ; CHECK-NEXT: vmv2r.v v4, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) @@ -18927,6 +20024,7 @@ ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg3ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18945,6 +20043,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18960,6 +20059,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei32.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -18978,6 +20078,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -18993,6 +20094,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei8.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19011,6 +20113,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v0, (a0), v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19026,6 +20129,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei64.v v2, (a0), v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) @@ -19044,6 +20148,7 @@ ; CHECK-NEXT: vmv2r.v v6, v0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v0, (a0), v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) @@ -19059,6 +20164,7 @@ ; CHECK-NEXT: vmv2r.v v8, v2 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vsuxseg4ei16.v v2, (a0), v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -27,6 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -38,6 +41,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -49,6 +53,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -61,6 +66,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -71,6 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -83,6 +90,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -93,6 +101,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -105,6 +114,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -116,6 +126,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -128,6 +139,7 @@ ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -139,6 +151,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -151,6 +164,7 @@ ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -162,6 +176,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -176,6 +191,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -188,6 +204,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -198,6 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -212,6 +230,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -224,6 +243,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -235,6 +255,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -249,6 +270,7 @@ ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -261,6 +283,7 @@ ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -272,6 +295,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -286,6 +310,7 @@ ; CHECK-NEXT: vnsrl.wi v10, v16, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -298,6 +323,7 @@ ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -309,6 +335,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -7,6 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -17,6 +18,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -29,6 +31,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -41,6 +44,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -54,6 +58,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -66,6 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -76,6 +82,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -88,6 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -100,6 +108,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -113,6 +122,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -125,6 +135,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -135,6 +146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -147,6 +159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -159,6 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -172,6 +186,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -184,6 +199,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -194,6 +210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -206,6 +223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -218,6 +236,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -231,6 +250,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -243,6 +263,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -253,6 +274,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -265,6 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -277,6 +300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -290,6 +314,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -302,6 +327,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -312,6 +338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -324,6 +351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -336,6 +364,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -349,6 +378,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -361,6 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -371,6 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -383,6 +415,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -395,6 +428,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -408,6 +442,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -420,6 +455,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -430,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -442,6 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -454,6 +492,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -467,6 +506,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -479,6 +519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -489,6 +530,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -501,6 +543,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -513,6 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -526,6 +570,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -538,6 +583,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -548,6 +594,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -560,6 +607,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -572,6 +620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -585,6 +634,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -597,6 +647,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -607,6 +658,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -619,6 +671,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -631,6 +684,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -644,6 +698,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -656,6 +711,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -666,6 +722,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,6 +735,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -690,6 +748,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -703,6 +762,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -715,6 +775,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -725,6 +786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -737,6 +799,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -749,6 +812,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -762,6 +826,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -774,6 +839,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -784,6 +850,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -796,6 +863,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -808,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -821,6 +890,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -833,6 +903,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -843,6 +914,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -855,6 +927,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -867,6 +940,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -880,6 +954,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -892,6 +967,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -902,6 +978,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -914,6 +991,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -926,6 +1004,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -939,6 +1018,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -951,6 +1031,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -961,6 +1042,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -973,6 +1055,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -985,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -998,6 +1082,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1010,6 +1095,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -1020,6 +1106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1032,6 +1119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1044,6 +1132,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1057,6 +1146,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1069,6 +1159,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -1086,12 +1177,14 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1104,6 +1197,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1116,6 +1210,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1129,6 +1224,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1141,6 +1237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -1158,12 +1255,14 @@ ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1176,6 +1275,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1188,6 +1288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1201,6 +1302,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1213,6 +1315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -1230,12 +1333,14 @@ ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1248,6 +1353,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1260,6 +1366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1273,6 +1380,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1285,6 +1393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %vc = xor %va, %vb ret %vc @@ -1302,12 +1411,14 @@ ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %head = insertelement undef, i64 %b, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1320,6 +1431,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 -1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1332,6 +1444,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 8 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 8, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1345,6 +1458,7 @@ ; CHECK-NEXT: addi a0, zero, 16 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -11,6 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i7 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -25,6 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i8( %va, %b, %m, i32 %evl) ret %v @@ -35,6 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -47,6 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -59,6 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -73,6 +78,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -85,6 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -99,6 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -111,6 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -127,6 +136,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i8( %va, %b, %m, i32 %evl) ret %v @@ -137,6 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -149,6 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -161,6 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -175,6 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -187,6 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -201,6 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -213,6 +229,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -229,6 +246,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i8( %va, %b, %m, i32 %evl) ret %v @@ -239,6 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -251,6 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -263,6 +283,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -277,6 +298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -289,6 +311,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -303,6 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -315,6 +339,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -331,6 +356,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i8( %va, %b, %m, i32 %evl) ret %v @@ -341,6 +367,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -353,6 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -365,6 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -379,6 +408,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -391,6 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -405,6 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -417,6 +449,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -433,6 +466,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv15i8( %va, %b, %m, i32 %evl) ret %v @@ -443,6 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -455,6 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -467,6 +503,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -481,6 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -493,6 +531,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -507,6 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -519,6 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -535,6 +576,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i8( %va, %b, %m, i32 %evl) ret %v @@ -545,6 +587,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -557,6 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -569,6 +613,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -583,6 +628,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -595,6 +641,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -609,6 +656,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -621,6 +669,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -637,6 +686,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i8( %va, %b, %m, i32 %evl) ret %v @@ -647,6 +697,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -659,6 +710,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -671,6 +723,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -685,6 +738,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -697,6 +751,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -711,6 +766,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -723,6 +779,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -739,6 +796,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv64i8( %va, %b, %m, i32 %evl) ret %v @@ -749,6 +807,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -761,6 +820,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -773,6 +833,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -787,6 +848,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -799,6 +861,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -813,6 +876,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -825,6 +889,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i8 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -841,6 +906,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i16( %va, %b, %m, i32 %evl) ret %v @@ -851,6 +917,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -863,6 +930,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -875,6 +943,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -889,6 +958,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -901,6 +971,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -915,6 +986,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -927,6 +999,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -943,6 +1016,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i16( %va, %b, %m, i32 %evl) ret %v @@ -953,6 +1027,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -965,6 +1040,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -977,6 +1053,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -991,6 +1068,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1003,6 +1081,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1017,6 +1096,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1029,6 +1109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1045,6 +1126,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i16( %va, %b, %m, i32 %evl) ret %v @@ -1055,6 +1137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1067,6 +1150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1079,6 +1163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1093,6 +1178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1105,6 +1191,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1119,6 +1206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1131,6 +1219,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1147,6 +1236,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i16( %va, %b, %m, i32 %evl) ret %v @@ -1157,6 +1247,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1169,6 +1260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1181,6 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1195,6 +1288,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1207,6 +1301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1221,6 +1316,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1233,6 +1329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1249,6 +1346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i16( %va, %b, %m, i32 %evl) ret %v @@ -1259,6 +1357,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1271,6 +1370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1283,6 +1383,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1297,6 +1398,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1309,6 +1411,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1323,6 +1426,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1335,6 +1439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1351,6 +1456,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i16( %va, %b, %m, i32 %evl) ret %v @@ -1361,6 +1467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1373,6 +1480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1385,6 +1493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1399,6 +1508,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1411,6 +1521,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1425,6 +1536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1437,6 +1549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i16 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1453,6 +1566,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i32( %va, %b, %m, i32 %evl) ret %v @@ -1463,6 +1577,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1475,6 +1590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1487,6 +1603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1501,6 +1618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1513,6 +1631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1527,6 +1646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1539,6 +1659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1555,6 +1676,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i32( %va, %b, %m, i32 %evl) ret %v @@ -1565,6 +1687,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1577,6 +1700,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1589,6 +1713,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1603,6 +1728,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1615,6 +1741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1629,6 +1756,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1641,6 +1769,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1657,6 +1786,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i32( %va, %b, %m, i32 %evl) ret %v @@ -1667,6 +1797,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1679,6 +1810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1691,6 +1823,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1705,6 +1838,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1717,6 +1851,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1731,6 +1866,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1743,6 +1879,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1759,6 +1896,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i32( %va, %b, %m, i32 %evl) ret %v @@ -1769,6 +1907,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1781,6 +1920,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1793,6 +1933,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1807,6 +1948,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1819,6 +1961,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1833,6 +1976,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1845,6 +1989,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1861,6 +2006,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i32( %va, %b, %m, i32 %evl) ret %v @@ -1871,6 +2017,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1883,6 +2030,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1895,6 +2043,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1909,6 +2058,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1921,6 +2071,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1935,6 +2086,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1947,6 +2099,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i32 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -1963,6 +2116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i64( %va, %b, %m, i32 %evl) ret %v @@ -1973,6 +2127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -1993,12 +2148,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2019,12 +2176,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2039,6 +2198,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2051,6 +2211,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2065,6 +2226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2077,6 +2239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2093,6 +2256,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i64( %va, %b, %m, i32 %evl) ret %v @@ -2103,6 +2267,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2123,12 +2288,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2149,12 +2316,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2169,6 +2338,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2181,6 +2351,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2195,6 +2366,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2207,6 +2379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2223,6 +2396,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i64( %va, %b, %m, i32 %evl) ret %v @@ -2233,6 +2407,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v12 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2253,12 +2428,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2279,12 +2456,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2299,6 +2478,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2311,6 +2491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2325,6 +2506,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2337,6 +2519,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2353,6 +2536,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i64( %va, %b, %m, i32 %evl) ret %v @@ -2363,6 +2547,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vv v8, v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 true, i32 0 %m = shufflevector %head, undef, zeroinitializer @@ -2383,12 +2568,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2409,12 +2596,14 @@ ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vxor.vx v8, v8, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %elt.head = insertelement undef, i64 %b, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2429,6 +2618,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2441,6 +2631,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 7, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2455,6 +2646,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer @@ -2467,6 +2659,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %elt.head = insertelement undef, i64 -1, i32 0 %vb = shufflevector %elt.head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -39,6 +39,7 @@ ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3 ; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10 ; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16 + ; CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 0 ; CHECK-NEXT: PseudoRET %0:gpr = COPY $x10 %1:gprnox0 = COPY $x11 diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0) @@ -36,6 +37,7 @@ ; CHECK-NEXT: vmv4r.v v8, v4 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0) @@ -55,6 +57,7 @@ ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) @@ -75,6 +78,7 @@ ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 0) @@ -95,6 +99,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 0) @@ -113,6 +118,7 @@ ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0, i64 1) @@ -132,6 +138,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i64 0) @@ -145,6 +152,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 0) @@ -161,6 +169,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i64 %offset, i64 0) @@ -174,6 +183,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i64 %offset, %mask, i64 0) @@ -191,6 +201,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) @@ -205,6 +216,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) @@ -222,6 +234,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 0) @@ -236,6 +249,7 @@ ; CHECK-NEXT: vmv4r.v v12, v8 ; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 0) diff --git a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll --- a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll +++ b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll @@ -9,6 +9,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vs1r.v v8, (a1) ; CHECK-NEXT: vs1r.v v9, (a2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: br label %return diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll --- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll +++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll @@ -18,6 +18,7 @@ ; RV32I-NEXT: and a2, a2, a1 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_select_all_ones_i32: @@ -27,6 +28,7 @@ ; RV64I-NEXT: and a2, a2, a1 ; RV64I-NEXT: .LBB0_2: ; RV64I-NEXT: sext.w a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 %x, i32 -1 %b = and i32 %a, %y @@ -43,6 +45,7 @@ ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_select_all_ones_i64: @@ -52,6 +55,7 @@ ; RV64I-NEXT: and a2, a2, a1 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 -1, i64 %x %b = and i64 %y, %a @@ -66,6 +70,7 @@ ; RV32I-NEXT: or a2, a2, a1 ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_select_all_zeros_i32: @@ -75,6 +80,7 @@ ; RV64I-NEXT: or a2, a2, a1 ; RV64I-NEXT: .LBB2_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 %x, i32 0 %b = or i32 %y, %a @@ -91,6 +97,7 @@ ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_select_all_zeros_i64: @@ -100,6 +107,7 @@ ; RV64I-NEXT: or a2, a2, a1 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 0, i64 %x %b = or i64 %a, %y @@ -114,6 +122,7 @@ ; RV32I-NEXT: xor a2, a2, a1 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor_select_all_zeros_i32: @@ -123,6 +132,7 @@ ; RV64I-NEXT: xor a2, a2, a1 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = xor i32 %y, %a @@ -139,6 +149,7 @@ ; RV32I-NEXT: .LBB5_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: xor_select_all_zeros_i64: @@ -148,6 +159,7 @@ ; RV64I-NEXT: xor a2, a2, a1 ; RV64I-NEXT: .LBB5_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = xor i64 %a, %y @@ -162,6 +174,7 @@ ; RV32I-NEXT: add a2, a2, a1 ; RV32I-NEXT: .LBB6_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_select_all_zeros_i32: @@ -171,6 +184,7 @@ ; RV64I-NEXT: addw a2, a2, a1 ; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = add i32 %y, %a @@ -190,6 +204,7 @@ ; RV32I-NEXT: .LBB7_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_select_all_zeros_i64: @@ -199,6 +214,7 @@ ; RV64I-NEXT: add a2, a2, a1 ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = add i64 %a, %y @@ -213,6 +229,7 @@ ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: .LBB8_2: ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_select_all_zeros_i32: @@ -222,6 +239,7 @@ ; RV64I-NEXT: subw a2, a2, a1 ; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i32 0, i32 %x %b = sub i32 %y, %a @@ -240,6 +258,7 @@ ; RV32I-NEXT: .LBB9_2: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: mv a1, a4 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_select_all_zeros_i64: @@ -249,6 +268,7 @@ ; RV64I-NEXT: sub a2, a2, a1 ; RV64I-NEXT: .LBB9_2: ; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = select i1 %c, i64 %x, i64 0 %b = sub i64 %y, %a diff --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll --- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll +++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll @@ -9,6 +9,7 @@ ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: addi a1, a1, -1 ; RV32-NEXT: xor a0, a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: xori64i32: @@ -17,6 +18,7 @@ ; RV64-NEXT: lui a1, 524288 ; RV64-NEXT: addiw a1, a1, -1 ; RV64-NEXT: xor a0, a0, a1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %shr4 = ashr i64 %a, 63 %conv5 = trunc i64 %shr4 to i32 @@ -31,6 +33,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti64i64: @@ -39,6 +42,7 @@ ; RV64-NEXT: lui a1, 524288 ; RV64-NEXT: addiw a1, a1, -1 ; RV64-NEXT: xor a0, a0, a1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i64 %a, -1 %s = select i1 %c, i64 2147483647, i64 -2147483648 @@ -52,6 +56,7 @@ ; RV32-NEXT: slt a0, a0, a1 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: sub a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti64i32: @@ -60,6 +65,7 @@ ; RV64-NEXT: lui a1, 524288 ; RV64-NEXT: addiw a1, a1, -1 ; RV64-NEXT: xor a0, a0, a1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i64 %a, -1 %s = select i1 %c, i32 2147483647, i32 -2147483648 @@ -73,6 +79,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti32i64: @@ -81,6 +88,7 @@ ; RV64-NEXT: lui a1, 524288 ; RV64-NEXT: addiw a1, a1, -1 ; RV64-NEXT: xor a0, a0, a1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i32 %a, -1 %s = select i1 %c, i64 2147483647, i64 -2147483648 @@ -94,12 +102,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xori a0, a0, 84 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: xori32i8: ; RV64: # %bb.0: ; RV64-NEXT: sraiw a0, a0, 31 ; RV64-NEXT: xori a0, a0, 84 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %shr4 = ashr i32 %a, 31 %conv5 = trunc i32 %shr4 to i8 @@ -112,12 +122,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xori a0, a0, 84 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti32i32: ; RV64: # %bb.0: ; RV64-NEXT: sraiw a0, a0, 31 ; RV64-NEXT: xori a0, a0, 84 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i32 %a, -1 %s = select i1 %c, i32 84, i32 -85 @@ -129,12 +141,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xori a0, a0, 84 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti32i8: ; RV64: # %bb.0: ; RV64-NEXT: sraiw a0, a0, 31 ; RV64-NEXT: xori a0, a0, 84 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i32 %a, -1 %s = select i1 %c, i8 84, i8 -85 @@ -147,6 +161,7 @@ ; RV32-NEXT: slli a0, a0, 24 ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xori a0, a0, 84 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: selecti8i32: @@ -154,6 +169,7 @@ ; RV64-NEXT: slli a0, a0, 56 ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: xori a0, a0, 84 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sgt i8 %a, -1 %s = select i1 %c, i32 84, i32 -85 @@ -168,6 +184,7 @@ ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB8_2: ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: icmpasreq: @@ -178,6 +195,7 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a0, a2 ; RV64-NEXT: .LBB8_2: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %sh = ashr i32 %input, 31 %c = icmp eq i32 %sh, -1 @@ -193,6 +211,7 @@ ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB9_2: ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: icmpasrne: @@ -203,6 +222,7 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a0, a2 ; RV64-NEXT: .LBB9_2: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %sh = ashr i32 %input, 31 %c = icmp ne i32 %sh, -1 @@ -220,6 +240,7 @@ ; RV32-NEXT: mv a2, a1 ; RV32-NEXT: .LBB10_2: ; RV32-NEXT: add a0, a3, a2 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: oneusecmp: @@ -232,6 +253,7 @@ ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: .LBB10_2: ; RV64-NEXT: addw a0, a0, a2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %c = icmp sle i32 %a, -1 %s = select i1 %c, i32 -128, i32 127 diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll --- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -443,12 +443,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %xor = xor i1 %x, 1 %sext = sext i1 %xor to i32 @@ -462,12 +464,14 @@ ; RV32I-NEXT: addi a0, a1, -1 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %xor = xor i1 %x, 1 %sext = sext i1 %xor to i64 @@ -481,6 +485,7 @@ ; RV32I-NEXT: addi a0, a0, -7 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_cmp_i32: @@ -489,6 +494,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i32 %x, 7 %xor = xor i1 %cmp, 1 @@ -504,6 +510,7 @@ ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_of_not_cmp_i64: @@ -511,6 +518,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i64 %x, 7 %xor = xor i1 %cmp, 1 @@ -525,6 +533,7 @@ ; RV32I-NEXT: addi a0, a0, -7 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: dec_of_zexted_cmp_i32: @@ -533,6 +542,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i32 %x, 7 %zext = zext i1 %cmp to i32 @@ -549,6 +559,7 @@ ; RV32I-NEXT: addi a0, a1, -1 ; RV32I-NEXT: sltu a1, a0, a1 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: dec_of_zexted_cmp_i64: @@ -556,6 +567,7 @@ ; RV64I-NEXT: addi a0, a0, -7 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %cmp = icmp eq i64 %x, 7 %zext = zext i1 %cmp to i64 diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -7,10 +7,12 @@ define void @f1() shadowcallstack { ; RV32-LABEL: f1: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f1: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ret void } @@ -20,10 +22,12 @@ define void @f2() shadowcallstack { ; RV32-LABEL: f2: ; RV32: # %bb.0: +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: tail foo@plt ; ; RV64-LABEL: f2: ; RV64: # %bb.0: +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: tail foo@plt tail call void @foo() ret void @@ -42,9 +46,11 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call bar@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: lw ra, -4(s2) ; RV32-NEXT: addi s2, s2, -4 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f3: @@ -57,9 +63,11 @@ ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call bar@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ld ra, -8(s2) ; RV64-NEXT: addi s2, s2, -8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res = call i32 @bar() %res1 = add i32 %res, 1 @@ -95,9 +103,14 @@ ; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore ra +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s3 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: lw ra, -4(s2) ; RV32-NEXT: addi s2, s2, -4 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: f4: @@ -128,9 +141,14 @@ ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: .cfi_restore ra +; RV64-NEXT: .cfi_restore s0 +; RV64-NEXT: .cfi_restore s1 +; RV64-NEXT: .cfi_restore s3 ; RV64-NEXT: addi sp, sp, 32 ; RV64-NEXT: ld ra, -8(s2) ; RV64-NEXT: addi s2, s2, -8 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res1 = call i32 @bar() %res2 = call i32 @bar() diff --git a/llvm/test/CodeGen/RISCV/shift-and.ll b/llvm/test/CodeGen/RISCV/shift-and.ll --- a/llvm/test/CodeGen/RISCV/shift-and.ll +++ b/llvm/test/CodeGen/RISCV/shift-and.ll @@ -13,12 +13,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 5 ; RV32I-NEXT: andi a0, a0, -8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test1: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 8 ; RV64I-NEXT: slli a0, a0, 3 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i32 %x, 5 %b = and i32 %a, 134217720 @@ -33,12 +35,14 @@ ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: srli a1, a1, 5 ; RV32I-NEXT: andi a0, a0, -8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test2: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 5 ; RV64I-NEXT: andi a0, a0, -8 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i64 %x, 5 %b = and i64 %a, 576460752303423480 @@ -50,12 +54,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 20 ; RV32I-NEXT: slli a0, a0, 14 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test3: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 20 ; RV64I-NEXT: slli a0, a0, 14 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i32 %x, 6 %b = and i32 %a, 67092480 @@ -71,12 +77,14 @@ ; RV32I-NEXT: srli a1, a1, 6 ; RV32I-NEXT: lui a2, 1048572 ; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test4: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 20 ; RV64I-NEXT: slli a0, a0, 14 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = lshr i64 %x, 6 %b = and i64 %a, 288230376151695360 @@ -88,6 +96,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 10 ; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test5: @@ -95,6 +104,7 @@ ; RV64I-NEXT: slliw a0, a0, 6 ; RV64I-NEXT: lui a1, 1048560 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = shl i32 %x, 6 %b = and i32 %a, -65536 @@ -109,12 +119,14 @@ ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: srli a0, a0, 10 ; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test6: ; RV64I: # %bb.0: ; RV64I-NEXT: srli a0, a0, 10 ; RV64I-NEXT: slli a0, a0, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %a = shl i64 %x, 6 %b = and i64 %a, -65536 diff --git a/llvm/test/CodeGen/RISCV/shlimm-addimm.ll b/llvm/test/CodeGen/RISCV/shlimm-addimm.ll --- a/llvm/test/CodeGen/RISCV/shlimm-addimm.ll +++ b/llvm/test/CodeGen/RISCV/shlimm-addimm.ll @@ -14,12 +14,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: addi a0, a0, 1184 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add1184_a: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: addiw a0, a0, 1184 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 1184 @@ -31,12 +33,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 5 ; RV32I-NEXT: addi a0, a0, 1184 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add1184_b: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 5 ; RV64I-NEXT: addiw a0, a0, 1184 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 1184 @@ -53,12 +57,14 @@ ; RV32I-NEXT: addi a0, a2, 1184 ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add1184_c: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: addi a0, a0, 1184 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 1184 @@ -72,6 +78,7 @@ ; RV32I-NEXT: lui a1, 25 ; RV32I-NEXT: addi a1, a1, -1376 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add101024_a: @@ -80,6 +87,7 @@ ; RV64I-NEXT: lui a1, 25 ; RV64I-NEXT: addiw a1, a1, -1376 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 101024 @@ -93,6 +101,7 @@ ; RV32I-NEXT: lui a1, 25 ; RV32I-NEXT: addi a1, a1, -1376 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add101024_b: @@ -101,6 +110,7 @@ ; RV64I-NEXT: lui a1, 25 ; RV64I-NEXT: addiw a1, a1, -1376 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 101024 @@ -119,6 +129,7 @@ ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add101024_c: @@ -127,6 +138,7 @@ ; RV64I-NEXT: lui a1, 25 ; RV64I-NEXT: addiw a1, a1, -1376 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 101024 @@ -140,6 +152,7 @@ ; RV32I-NEXT: lui a1, 12 ; RV32I-NEXT: addi a1, a1, -1184 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47968_a: @@ -148,6 +161,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1184 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47968 @@ -161,6 +175,7 @@ ; RV32I-NEXT: lui a1, 12 ; RV32I-NEXT: addi a1, a1, -1184 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47968_b: @@ -169,6 +184,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1184 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47968 @@ -187,6 +203,7 @@ ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47968_c: @@ -195,6 +212,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1184 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 47968 @@ -208,6 +226,7 @@ ; RV32I-NEXT: lui a1, 12 ; RV32I-NEXT: addi a1, a1, -1183 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47969_a: @@ -216,6 +235,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1183 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47969 @@ -229,6 +249,7 @@ ; RV32I-NEXT: lui a1, 12 ; RV32I-NEXT: addi a1, a1, -1183 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47969_b: @@ -237,6 +258,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1183 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, 47969 @@ -255,6 +277,7 @@ ; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_add47969_c: @@ -263,6 +286,7 @@ ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1183 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, 47969 @@ -276,6 +300,7 @@ ; RV32I-NEXT: lui a1, 1048564 ; RV32I-NEXT: addi a1, a1, 1184 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47968_a: @@ -284,6 +309,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1184 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47968 @@ -297,6 +323,7 @@ ; RV32I-NEXT: lui a1, 1048564 ; RV32I-NEXT: addi a1, a1, 1184 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47968_b: @@ -305,6 +332,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1184 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47968 @@ -324,6 +352,7 @@ ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47968_c: @@ -332,6 +361,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1184 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, -47968 @@ -345,6 +375,7 @@ ; RV32I-NEXT: lui a1, 1048564 ; RV32I-NEXT: addi a1, a1, 1183 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47969_a: @@ -353,6 +384,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1183 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47969 @@ -366,6 +398,7 @@ ; RV32I-NEXT: lui a1, 1048564 ; RV32I-NEXT: addi a1, a1, 1183 ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47969_b: @@ -374,6 +407,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1183 ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 %tmp1 = add i32 %tmp0, -47969 @@ -393,6 +427,7 @@ ; RV32I-NEXT: sltu a2, a0, a2 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: addi a1, a1, -1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl5_sub47969_c: @@ -401,6 +436,7 @@ ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1183 ; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %tmp0 = shl i64 %x, 5 %tmp1 = add i64 %tmp0, -47969 diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll --- a/llvm/test/CodeGen/RISCV/split-offsets.ll +++ b/llvm/test/CodeGen/RISCV/split-offsets.ll @@ -22,6 +22,7 @@ ; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: sw a3, 0(a1) ; RV32I-NEXT: sw a2, 4(a1) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test1: @@ -37,6 +38,7 @@ ; RV64I-NEXT: sw a3, 4(a0) ; RV64I-NEXT: sw a3, 0(a1) ; RV64I-NEXT: sw a2, 4(a1) +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: %s = load [65536 x i32]*, [65536 x i32]** %sp @@ -72,6 +74,7 @@ ; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: blt a3, a2, .LBB1_1 ; RV32I-NEXT: .LBB1_2: # %while_end +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test2: @@ -96,6 +99,7 @@ ; RV64I-NEXT: sext.w a4, a3 ; RV64I-NEXT: blt a4, a2, .LBB1_1 ; RV64I-NEXT: .LBB1_2: # %while_end +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: %s = load [65536 x i32]*, [65536 x i32]** %sp diff --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll --- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll +++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll @@ -30,8 +30,13 @@ ; RV32I-NEXT: addi sp, s0, -64 ; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 64 ; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 +; RV32I-NEXT: .cfi_restore s1 ; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller: @@ -59,8 +64,13 @@ ; RV64I-NEXT: addi sp, s0, -64 ; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 64 ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 +; RV64I-NEXT: .cfi_restore s1 ; RV64I-NEXT: addi sp, sp, 64 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, i32 %n %2 = alloca i32, align 64 diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll --- a/llvm/test/CodeGen/RISCV/stack-realignment.ll +++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll @@ -22,8 +22,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -32 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 32 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller32: @@ -41,8 +45,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -32 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 32 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 32 call void @callee(i8* %1) @@ -59,7 +67,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign32: @@ -71,7 +81,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 32 call void @callee(i8* %1) @@ -94,8 +106,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -64 ; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 64 ; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller64: @@ -113,8 +129,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -64 ; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 64 ; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 64 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 64 call void @callee(i8* %1) @@ -131,7 +151,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign64: @@ -143,7 +165,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 64 call void @callee(i8* %1) @@ -166,8 +190,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -128 ; RV32I-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 128 ; RV32I-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 128 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller128: @@ -185,8 +213,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -128 ; RV64I-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 128 ; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 128 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 128 call void @callee(i8* %1) @@ -203,7 +235,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign128: @@ -215,7 +249,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 128 call void @callee(i8* %1) @@ -238,8 +274,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -256 ; RV32I-NEXT: lw s0, 248(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 256 ; RV32I-NEXT: lw ra, 252(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 256 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller256: @@ -257,8 +297,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -256 ; RV64I-NEXT: ld s0, 240(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 256 ; RV64I-NEXT: ld ra, 248(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 256 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 256 call void @callee(i8* %1) @@ -275,7 +319,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign256: @@ -287,7 +333,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 256 call void @callee(i8* %1) @@ -310,8 +358,12 @@ ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: addi sp, s0, -1024 ; RV32I-NEXT: lw s0, 1016(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 1024 ; RV32I-NEXT: lw ra, 1020(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 1024 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller512: @@ -329,8 +381,12 @@ ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: addi sp, s0, -1024 ; RV64I-NEXT: ld s0, 1008(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 1024 ; RV64I-NEXT: ld ra, 1016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 1024 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 512 call void @callee(i8* %1) @@ -347,7 +403,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign512: @@ -359,7 +417,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 512 call void @callee(i8* %1) @@ -384,8 +444,12 @@ ; RV32I-NEXT: addi sp, s0, -2048 ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller1024: @@ -405,8 +469,12 @@ ; RV64I-NEXT: addi sp, s0, -2048 ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 1024 call void @callee(i8* %1) @@ -423,7 +491,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign1024: @@ -435,7 +505,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 1024 call void @callee(i8* %1) @@ -467,8 +539,12 @@ ; RV32I-NEXT: addi a0, a0, -2032 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller2048: @@ -495,8 +571,12 @@ ; RV64I-NEXT: addiw a0, a0, -2032 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 2048 call void @callee(i8* %1) @@ -513,7 +593,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign2048: @@ -525,7 +607,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 2048 call void @callee(i8* %1) @@ -557,8 +641,12 @@ ; RV32I-NEXT: addi a0, a0, -2032 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_def_cfa sp, 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra +; RV32I-NEXT: .cfi_restore s0 ; RV32I-NEXT: addi sp, sp, 2032 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller4096: @@ -585,8 +673,12 @@ ; RV64I-NEXT: addiw a0, a0, -2032 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_def_cfa sp, 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra +; RV64I-NEXT: .cfi_restore s0 ; RV64I-NEXT: addi sp, sp, 2032 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 4096 call void @callee(i8* %1) @@ -603,7 +695,9 @@ ; RV32I-NEXT: mv a0, sp ; RV32I-NEXT: call callee@plt ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: caller_no_realign4096: @@ -615,7 +709,9 @@ ; RV64I-NEXT: mv a0, sp ; RV64I-NEXT: call callee@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = alloca i8, align 4096 call void @callee(i8* %1) diff --git a/llvm/test/CodeGen/RISCV/tail-calls.ll b/llvm/test/CodeGen/RISCV/tail-calls.ll --- a/llvm/test/CodeGen/RISCV/tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/tail-calls.ll @@ -71,6 +71,7 @@ ; CHECK-NEXT: mv a4, a5 ; CHECK-NEXT: mv a5, a6 ; CHECK-NEXT: mv a6, a7 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr t1 %9 = tail call i32 %0(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) ret i32 %9 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -53,6 +53,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: addi a1, sp, 24 ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 48 +; ILP32-ILP32F-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-FPELIM-NEXT: ret ; ; ILP32-ILP32F-WITHFP-LABEL: va1: @@ -76,8 +77,12 @@ ; ILP32-ILP32F-WITHFP-NEXT: addi a1, s0, 8 ; ILP32-ILP32F-WITHFP-NEXT: sw a1, -12(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa sp, 16 ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore ra +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore s0 ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 48 +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-WITHFP-NEXT: ret ; ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va1: @@ -95,6 +100,7 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, sp, 24 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-FPELIM-LABEL: va1: @@ -112,6 +118,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1: @@ -135,8 +142,12 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa sp, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore ra +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore s0 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %va = alloca i8*, align 4 %1 = bitcast i8** %va to i8* @@ -1777,6 +1788,7 @@ ; ILP32-ILP32F-FPELIM-NEXT: lui a1, 24414 ; ILP32-ILP32F-FPELIM-NEXT: addi a1, a1, 304 ; ILP32-ILP32F-FPELIM-NEXT: add sp, sp, a1 +; ILP32-ILP32F-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-FPELIM-NEXT: ret ; ; ILP32-ILP32F-WITHFP-LABEL: va_large_stack: @@ -1809,8 +1821,12 @@ ; ILP32-ILP32F-WITHFP-NEXT: addi a1, a1, -1728 ; ILP32-ILP32F-WITHFP-NEXT: add sp, sp, a1 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa sp, 2064 ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore ra +; ILP32-ILP32F-WITHFP-NEXT: .cfi_restore s0 ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 2032 +; ILP32-ILP32F-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32-ILP32F-WITHFP-NEXT: ret ; ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-LABEL: va_large_stack: @@ -1855,6 +1871,7 @@ ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a1, 24414 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, a1, 304 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add sp, sp, a1 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack: @@ -1902,6 +1919,7 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 ; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a1, a1, 336 ; LP64-LP64F-LP64D-FPELIM-NEXT: add sp, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack: @@ -1934,8 +1952,12 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a1, a1, -1680 ; LP64-LP64F-LP64D-WITHFP-NEXT: add sp, sp, a1 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa sp, 2096 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore ra +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_restore s0 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032 +; LP64-LP64F-LP64D-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %large = alloca [ 100000000 x i8 ] %va = alloca i8*, align 4 diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -45,6 +45,7 @@ ; RV32-NEXT: .LBB0_8: ; RV32-NEXT: sb a0, 2(a1) ; RV32-NEXT: sh a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vec3_setcc_crash: @@ -83,6 +84,7 @@ ; RV64-NEXT: .LBB0_8: ; RV64-NEXT: sb a0, 2(a1) ; RV64-NEXT: sh a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = load <3 x i8>, <3 x i8>* %in %cmp = icmp sgt <3 x i8> %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -15,6 +15,7 @@ ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: sw a3, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i32: @@ -26,6 +27,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i32: @@ -35,6 +37,7 @@ ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: sw a3, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i32: @@ -46,6 +49,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -62,6 +66,7 @@ ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i32: @@ -72,6 +77,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i32: @@ -79,6 +85,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i32: @@ -89,6 +96,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4) @@ -106,6 +114,7 @@ ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i32: @@ -116,6 +125,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i32: @@ -124,6 +134,7 @@ ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i32: @@ -134,6 +145,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4) @@ -152,6 +164,7 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo4.i32: @@ -164,6 +177,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo4.i32: @@ -173,6 +187,7 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo4.i32: @@ -185,6 +200,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215) @@ -208,6 +224,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a5, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo1.i64: @@ -217,6 +234,7 @@ ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: sd a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo1.i64: @@ -232,6 +250,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo1.i64: @@ -241,6 +260,7 @@ ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: xor a0, a1, a0 ; RV64ZBA-NEXT: sd a3, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -262,6 +282,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo2.i64: @@ -269,6 +290,7 @@ ; RV64-NEXT: addi a2, a0, 4 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo2.i64: @@ -282,6 +304,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo2.i64: @@ -289,6 +312,7 @@ ; RV64ZBA-NEXT: addi a2, a0, 4 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4) @@ -310,6 +334,7 @@ ; RV32-NEXT: slti a0, a0, 0 ; RV32-NEXT: sw a3, 0(a2) ; RV32-NEXT: sw a4, 4(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo3.i64: @@ -318,6 +343,7 @@ ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo3.i64: @@ -331,6 +357,7 @@ ; RV32ZBA-NEXT: slti a0, a0, 0 ; RV32ZBA-NEXT: sw a3, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo3.i64: @@ -339,6 +366,7 @@ ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 ; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4) @@ -354,6 +382,7 @@ ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32: @@ -362,6 +391,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32: @@ -369,6 +399,7 @@ ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32: @@ -377,6 +408,7 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: sw a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -392,6 +424,7 @@ ; RV32-NEXT: addi a2, a0, -2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant: @@ -400,6 +433,7 @@ ; RV64-NEXT: addiw a3, a0, -2 ; RV64-NEXT: sltu a0, a3, a2 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: @@ -407,6 +441,7 @@ ; RV32ZBA-NEXT: addi a2, a0, -2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant: @@ -415,6 +450,7 @@ ; RV64ZBA-NEXT: addiw a3, a0, -2 ; RV64ZBA-NEXT: sltu a0, a3, a2 ; RV64ZBA-NEXT: sw a3, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2) @@ -437,6 +473,7 @@ ; RV32-NEXT: .LBB9_2: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i64: @@ -444,6 +481,7 @@ ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i64: @@ -458,6 +496,7 @@ ; RV32ZBA-NEXT: .LBB9_2: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i64: @@ -465,6 +504,7 @@ ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -482,6 +522,7 @@ ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo1.i32: @@ -493,6 +534,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo1.i32: @@ -502,6 +544,7 @@ ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo1.i32: @@ -513,6 +556,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -528,6 +572,7 @@ ; RV32-NEXT: addi a2, a0, 4 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo2.i32: @@ -538,6 +583,7 @@ ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo2.i32: @@ -545,6 +591,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 4 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo2.i32: @@ -555,6 +602,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4) @@ -578,6 +626,7 @@ ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.i64: @@ -587,6 +636,7 @@ ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.i64: @@ -602,6 +652,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a5, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.i64: @@ -611,6 +662,7 @@ ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -626,6 +678,7 @@ ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32: @@ -634,6 +687,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32: @@ -641,6 +695,7 @@ ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32: @@ -649,6 +704,7 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: sw a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -664,6 +720,7 @@ ; RV32-NEXT: addi a2, a0, 2 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.rhs: @@ -672,6 +729,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a2 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.rhs: @@ -679,6 +737,7 @@ ; RV32ZBA-NEXT: addi a2, a0, 2 ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.rhs: @@ -687,6 +746,7 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a2 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2) @@ -704,6 +764,7 @@ ; RV32-NEXT: addi a0, a2, 1 ; RV32-NEXT: seqz a0, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i32.constant.lhs: @@ -713,6 +774,7 @@ ; RV64-NEXT: addi a0, a2, 1 ; RV64-NEXT: seqz a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.lhs: @@ -722,6 +784,7 @@ ; RV32ZBA-NEXT: addi a0, a2, 1 ; RV32ZBA-NEXT: seqz a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i32.constant.lhs: @@ -731,6 +794,7 @@ ; RV64ZBA-NEXT: addi a0, a2, 1 ; RV64ZBA-NEXT: seqz a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1) @@ -756,6 +820,7 @@ ; RV32-NEXT: .LBB16_3: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.i64: @@ -763,6 +828,7 @@ ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i64: @@ -780,6 +846,7 @@ ; RV32ZBA-NEXT: .LBB16_3: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.i64: @@ -787,6 +854,7 @@ ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: sd a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -805,6 +873,7 @@ ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a1, 0(a2) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i32: @@ -816,6 +885,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i32: @@ -826,6 +896,7 @@ ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a1, 0(a2) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.i32: @@ -837,6 +908,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a3 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a3, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -856,6 +928,7 @@ ; RV32-NEXT: xor a0, a3, a0 ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a2, 0(a1) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i32: @@ -867,6 +940,7 @@ ; RV64-NEXT: xor a0, a0, a3 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i32: @@ -878,6 +952,7 @@ ; RV32ZBA-NEXT: xor a0, a3, a0 ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a2, 0(a1) +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i32: @@ -889,6 +964,7 @@ ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13) @@ -963,7 +1039,12 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 +; RV32-NEXT: .cfi_restore s3 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.i64: @@ -974,6 +1055,7 @@ ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.i64: @@ -1040,7 +1122,12 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 +; RV32ZBA-NEXT: .cfi_restore s3 ; RV32ZBA-NEXT: addi sp, sp, 16 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.i64: @@ -1051,6 +1138,7 @@ ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sd a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -1085,6 +1173,7 @@ ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw t0, 4(a2) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.i64: @@ -1096,6 +1185,7 @@ ; RV64-NEXT: xor a0, a3, a0 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.i64: @@ -1122,6 +1212,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw t0, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.i64: @@ -1133,6 +1224,7 @@ ; RV64ZBA-NEXT: xor a0, a3, a0 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13) @@ -1150,6 +1242,7 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i32: @@ -1160,6 +1253,7 @@ ; RV64-NEXT: srli a0, a1, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i32: @@ -1169,6 +1263,7 @@ ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i32: @@ -1179,6 +1274,7 @@ ; RV64ZBA-NEXT: srli a0, a1, 32 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a1, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -1197,6 +1293,7 @@ ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: sw a0, 0(a1) ; RV32-NEXT: mv a0, a2 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i32: @@ -1208,6 +1305,7 @@ ; RV64-NEXT: srli a0, a2, 32 ; RV64-NEXT: snez a0, a0 ; RV64-NEXT: sw a2, 0(a1) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i32: @@ -1218,6 +1316,7 @@ ; RV32ZBA-NEXT: mul a0, a0, a3 ; RV32ZBA-NEXT: sw a0, 0(a1) ; RV32ZBA-NEXT: mv a0, a2 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i32: @@ -1228,6 +1327,7 @@ ; RV64ZBA-NEXT: srli a0, a2, 32 ; RV64ZBA-NEXT: snez a0, a0 ; RV64ZBA-NEXT: sw a2, 0(a1) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13) @@ -1246,6 +1346,7 @@ ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: mv a0, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo3.i32: @@ -1257,6 +1358,7 @@ ; RV64-NEXT: snez a1, a1 ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sw a1, 0(a2) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo3.i32: @@ -1266,6 +1368,7 @@ ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: mv a0, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo3.i32: @@ -1277,6 +1380,7 @@ ; RV64ZBA-NEXT: snez a3, a3 ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: sw a3, 0(a2) +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1) %5 = extractvalue { i32, i1 } %4, 1 @@ -1309,6 +1413,7 @@ ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a6, 4(a4) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.i64: @@ -1318,6 +1423,7 @@ ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a3 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.i64: @@ -1342,6 +1448,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a4) ; RV32ZBA-NEXT: sw a6, 4(a4) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.i64: @@ -1351,6 +1458,7 @@ ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: sd a0, 0(a2) ; RV64ZBA-NEXT: mv a0, a3 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -1375,6 +1483,7 @@ ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: sw a4, 4(a2) ; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.i64: @@ -1385,6 +1494,7 @@ ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: sd a0, 0(a1) ; RV64-NEXT: mv a0, a2 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.i64: @@ -1401,6 +1511,7 @@ ; RV32ZBA-NEXT: sw a0, 0(a2) ; RV32ZBA-NEXT: sw a4, 4(a2) ; RV32ZBA-NEXT: mv a0, a1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.i64: @@ -1411,6 +1522,7 @@ ; RV64ZBA-NEXT: mul a0, a0, a3 ; RV64ZBA-NEXT: sd a0, 0(a1) ; RV64ZBA-NEXT: mv a0, a2 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13) @@ -1434,6 +1546,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB26_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i32: @@ -1446,6 +1559,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB26_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i32: @@ -1457,6 +1571,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB26_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i32: @@ -1469,6 +1584,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB26_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1485,6 +1601,7 @@ ; RV32-NEXT: slti a1, a1, 0 ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i32: @@ -1495,6 +1612,7 @@ ; RV64-NEXT: addw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i32: @@ -1504,6 +1622,7 @@ ; RV32ZBA-NEXT: slti a1, a1, 0 ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i32: @@ -1514,6 +1633,7 @@ ; RV64ZBA-NEXT: addw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1538,6 +1658,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB28_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i64: @@ -1549,6 +1670,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB28_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i64: @@ -1566,6 +1688,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB28_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i64: @@ -1577,6 +1700,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB28_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1598,6 +1722,7 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: addi a1, zero, -1 ; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i64: @@ -1607,6 +1732,7 @@ ; RV64-NEXT: slti a1, a1, 0 ; RV64-NEXT: xor a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.not.i64: @@ -1621,6 +1747,7 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: addi a1, zero, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i64: @@ -1630,6 +1757,7 @@ ; RV64ZBA-NEXT: slti a1, a1, 0 ; RV64ZBA-NEXT: xor a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1646,6 +1774,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB30_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i32: @@ -1656,6 +1785,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB30_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i32: @@ -1665,6 +1795,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB30_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i32: @@ -1675,6 +1806,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB30_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1689,6 +1821,7 @@ ; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: sltu a0, a1, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i32: @@ -1697,6 +1830,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i32: @@ -1704,6 +1838,7 @@ ; RV32ZBA-NEXT: add a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a1, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i32: @@ -1712,6 +1847,7 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1731,6 +1867,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: beqz a4, .LBB32_4 ; RV32-NEXT: .LBB32_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB32_3: # %entry ; RV32-NEXT: sltu a4, a5, a1 @@ -1738,6 +1875,7 @@ ; RV32-NEXT: .LBB32_4: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i64: @@ -1747,6 +1885,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB32_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i64: @@ -1759,6 +1898,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: beqz a4, .LBB32_4 ; RV32ZBA-NEXT: .LBB32_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB32_3: # %entry ; RV32ZBA-NEXT: sltu a4, a5, a1 @@ -1766,6 +1906,7 @@ ; RV32ZBA-NEXT: .LBB32_4: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i64: @@ -1775,6 +1916,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB32_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1795,6 +1937,7 @@ ; RV32-NEXT: sltu a0, a2, a1 ; RV32-NEXT: .LBB33_2: # %entry ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.not.i64: @@ -1802,6 +1945,7 @@ ; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: sltu a0, a1, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.not.i64: @@ -1815,6 +1959,7 @@ ; RV32ZBA-NEXT: sltu a0, a2, a1 ; RV32ZBA-NEXT: .LBB33_2: # %entry ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.not.i64: @@ -1822,6 +1967,7 @@ ; RV64ZBA-NEXT: add a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a1, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1840,6 +1986,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB34_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i32: @@ -1852,6 +1999,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB34_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i32: @@ -1863,6 +2011,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB34_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i32: @@ -1875,6 +2024,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB34_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -1891,6 +2041,7 @@ ; RV32-NEXT: slt a0, a1, a0 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.not.i32: @@ -1901,6 +2052,7 @@ ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.not.i32: @@ -1910,6 +2062,7 @@ ; RV32ZBA-NEXT: slt a0, a1, a0 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.not.i32: @@ -1920,6 +2073,7 @@ ; RV64ZBA-NEXT: subw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -1942,6 +2096,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB36_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i64: @@ -1953,6 +2108,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB36_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i64: @@ -1968,6 +2124,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB36_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i64: @@ -1979,6 +2136,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB36_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -1998,6 +2156,7 @@ ; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: addi a1, zero, -1 ; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub.not.i64: @@ -2007,6 +2166,7 @@ ; RV64-NEXT: slt a0, a1, a0 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssub.not.i64: @@ -2019,6 +2179,7 @@ ; RV32ZBA-NEXT: and a0, a1, a0 ; RV32ZBA-NEXT: addi a1, zero, -1 ; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssub.not.i64: @@ -2028,6 +2189,7 @@ ; RV64ZBA-NEXT: slt a0, a1, a0 ; RV64ZBA-NEXT: xor a0, a2, a0 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2044,6 +2206,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB38_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i32: @@ -2054,6 +2217,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB38_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i32: @@ -2063,6 +2227,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB38_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i32: @@ -2073,6 +2238,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB38_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2087,6 +2253,7 @@ ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i32: @@ -2095,6 +2262,7 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i32: @@ -2102,6 +2270,7 @@ ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i32: @@ -2110,6 +2279,7 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2137,6 +2307,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB40_4: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i64: @@ -2146,6 +2317,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB40_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i64: @@ -2166,6 +2338,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB40_4: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i64: @@ -2175,6 +2348,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB40_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2193,11 +2367,13 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB41_2: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.not.i64: @@ -2205,6 +2381,7 @@ ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: sltu a0, a0, a1 ; RV64-NEXT: xori a0, a0, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.not.i64: @@ -2216,11 +2393,13 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB41_2: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.not.i64: @@ -2228,6 +2407,7 @@ ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: sltu a0, a0, a1 ; RV64ZBA-NEXT: xori a0, a0, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2246,6 +2426,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB42_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i32: @@ -2258,6 +2439,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB42_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i32: @@ -2269,6 +2451,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB42_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i32: @@ -2281,6 +2464,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB42_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2297,6 +2481,7 @@ ; RV32-NEXT: srai a0, a0, 31 ; RV32-NEXT: xor a0, a2, a0 ; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i32: @@ -2307,6 +2492,7 @@ ; RV64-NEXT: mulw a0, a0, a1 ; RV64-NEXT: xor a0, a0, a2 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i32: @@ -2316,6 +2502,7 @@ ; RV32ZBA-NEXT: srai a0, a0, 31 ; RV32ZBA-NEXT: xor a0, a2, a0 ; RV32ZBA-NEXT: seqz a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.not.i32: @@ -2326,6 +2513,7 @@ ; RV64ZBA-NEXT: mulw a0, a0, a1 ; RV64ZBA-NEXT: xor a0, a0, a2 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2396,7 +2584,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i64: @@ -2408,6 +2600,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB44_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i64: @@ -2471,7 +2664,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 ; RV32ZBA-NEXT: addi sp, sp, 16 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i64: @@ -2483,6 +2680,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB44_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2549,7 +2747,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.not.i64: @@ -2559,6 +2761,7 @@ ; RV64-NEXT: srai a0, a0, 63 ; RV64-NEXT: xor a0, a2, a0 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.not.i64: @@ -2618,7 +2821,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 ; RV32ZBA-NEXT: addi sp, sp, 16 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.not.i64: @@ -2628,6 +2835,7 @@ ; RV64ZBA-NEXT: srai a0, a0, 63 ; RV64ZBA-NEXT: xor a0, a2, a0 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2644,6 +2852,7 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB46_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i32: @@ -2656,6 +2865,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB46_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i32: @@ -2665,6 +2875,7 @@ ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 ; RV32ZBA-NEXT: .LBB46_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i32: @@ -2677,6 +2888,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB46_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2690,6 +2902,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 ; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i32: @@ -2699,12 +2912,14 @@ ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 ; RV32ZBA-NEXT: seqz a0, a0 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i32: @@ -2714,6 +2929,7 @@ ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srli a0, a0, 32 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2746,6 +2962,7 @@ ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB48_2: # %entry +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i64: @@ -2755,6 +2972,7 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB48_2: # %entry +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i64: @@ -2780,6 +2998,7 @@ ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: .LBB48_2: # %entry +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i64: @@ -2789,6 +3008,7 @@ ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 ; RV64ZBA-NEXT: .LBB48_2: # %entry +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2817,12 +3037,14 @@ ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a6 ; RV32-NEXT: xori a0, a0, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.not.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.not.i64: @@ -2844,12 +3066,14 @@ ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a6 ; RV32ZBA-NEXT: xori a0, a0, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.not.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a0, a0, a1 ; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2871,9 +3095,11 @@ ; RV32-NEXT: beq a1, a0, .LBB50_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB50_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i32: @@ -2885,9 +3111,11 @@ ; RV64-NEXT: beq a0, a2, .LBB50_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB50_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i32: @@ -2898,9 +3126,11 @@ ; RV32ZBA-NEXT: beq a1, a0, .LBB50_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB50_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i32: @@ -2912,9 +3142,11 @@ ; RV64ZBA-NEXT: beq a0, a2, .LBB50_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB50_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -2943,9 +3175,11 @@ ; RV32-NEXT: bgez a0, .LBB51_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB51_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.br.i64: @@ -2956,9 +3190,11 @@ ; RV64-NEXT: beq a1, a0, .LBB51_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB51_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.br.i64: @@ -2974,9 +3210,11 @@ ; RV32ZBA-NEXT: bgez a0, .LBB51_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB51_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.br.i64: @@ -2987,9 +3225,11 @@ ; RV64ZBA-NEXT: beq a1, a0, .LBB51_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB51_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -3011,9 +3251,11 @@ ; RV32-NEXT: bgeu a1, a0, .LBB52_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB52_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i32: @@ -3023,9 +3265,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB52_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB52_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: @@ -3034,9 +3278,11 @@ ; RV32ZBA-NEXT: bgeu a1, a0, .LBB52_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB52_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i32: @@ -3046,9 +3292,11 @@ ; RV64ZBA-NEXT: bgeu a1, a0, .LBB52_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB52_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -3077,9 +3325,11 @@ ; RV32-NEXT: beqz a0, .LBB53_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB53_4: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: @@ -3088,9 +3338,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB53_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB53_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i64: @@ -3106,9 +3358,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB53_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB53_4: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: @@ -3117,9 +3371,11 @@ ; RV64ZBA-NEXT: bgeu a1, a0, .LBB53_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB53_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -3143,9 +3399,11 @@ ; RV32-NEXT: beq a2, a0, .LBB54_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB54_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i32: @@ -3157,9 +3415,11 @@ ; RV64-NEXT: beq a0, a2, .LBB54_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB54_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i32: @@ -3170,9 +3430,11 @@ ; RV32ZBA-NEXT: beq a2, a0, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB54_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i32: @@ -3184,9 +3446,11 @@ ; RV64ZBA-NEXT: beq a0, a2, .LBB54_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB54_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -3213,9 +3477,11 @@ ; RV32-NEXT: bgez a0, .LBB55_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB55_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.br.i64: @@ -3226,9 +3492,11 @@ ; RV64-NEXT: beq a2, a0, .LBB55_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB55_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.br.i64: @@ -3242,9 +3510,11 @@ ; RV32ZBA-NEXT: bgez a0, .LBB55_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB55_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.br.i64: @@ -3255,9 +3525,11 @@ ; RV64ZBA-NEXT: beq a2, a0, .LBB55_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB55_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -3279,9 +3551,11 @@ ; RV32-NEXT: bgeu a0, a1, .LBB56_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB56_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i32: @@ -3291,9 +3565,11 @@ ; RV64-NEXT: bgeu a0, a1, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB56_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: @@ -3302,9 +3578,11 @@ ; RV32ZBA-NEXT: bgeu a0, a1, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB56_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i32: @@ -3314,9 +3592,11 @@ ; RV64ZBA-NEXT: bgeu a0, a1, .LBB56_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB56_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -3343,6 +3623,7 @@ ; RV32-NEXT: bnez a0, .LBB57_4 ; RV32-NEXT: .LBB57_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB57_3: ; RV32-NEXT: sub a1, a0, a2 @@ -3350,6 +3631,7 @@ ; RV32-NEXT: beqz a0, .LBB57_2 ; RV32-NEXT: .LBB57_4: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: @@ -3358,9 +3640,11 @@ ; RV64-NEXT: bgeu a0, a1, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB57_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i64: @@ -3374,6 +3658,7 @@ ; RV32ZBA-NEXT: bnez a0, .LBB57_4 ; RV32ZBA-NEXT: .LBB57_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB57_3: ; RV32ZBA-NEXT: sub a1, a0, a2 @@ -3381,6 +3666,7 @@ ; RV32ZBA-NEXT: beqz a0, .LBB57_2 ; RV32ZBA-NEXT: .LBB57_4: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: @@ -3389,9 +3675,11 @@ ; RV64ZBA-NEXT: bgeu a0, a1, .LBB57_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB57_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -3415,9 +3703,11 @@ ; RV32-NEXT: beq a2, a0, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB58_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i32: @@ -3429,9 +3719,11 @@ ; RV64-NEXT: beq a0, a2, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB58_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i32: @@ -3442,9 +3734,11 @@ ; RV32ZBA-NEXT: beq a2, a0, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB58_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i32: @@ -3456,9 +3750,11 @@ ; RV64ZBA-NEXT: beq a0, a2, .LBB58_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB58_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -3537,7 +3833,11 @@ ; RV32-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: .cfi_restore s0 +; RV32-NEXT: .cfi_restore s1 +; RV32-NEXT: .cfi_restore s2 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.br.i64: @@ -3548,9 +3848,11 @@ ; RV64-NEXT: beq a2, a0, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB59_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.br.i64: @@ -3616,7 +3918,11 @@ ; RV32ZBA-NEXT: lw s2, 4(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZBA-NEXT: .cfi_restore s0 +; RV32ZBA-NEXT: .cfi_restore s1 +; RV32ZBA-NEXT: .cfi_restore s2 ; RV32ZBA-NEXT: addi sp, sp, 16 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.br.i64: @@ -3627,9 +3933,11 @@ ; RV64ZBA-NEXT: beq a2, a0, .LBB59_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB59_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -3688,9 +3996,11 @@ ; RV32-NEXT: beqz a0, .LBB60_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB60_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: smulo2.br.i64: @@ -3702,9 +4012,11 @@ ; RV64-NEXT: beq a2, a0, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB60_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo2.br.i64: @@ -3750,9 +4062,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB60_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB60_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo2.br.i64: @@ -3764,9 +4078,11 @@ ; RV64ZBA-NEXT: beq a2, a0, .LBB60_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB60_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13) @@ -3788,9 +4104,11 @@ ; RV32-NEXT: beqz a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB61_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i32: @@ -3802,9 +4120,11 @@ ; RV64-NEXT: beqz a0, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB61_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: @@ -3813,9 +4133,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB61_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i32: @@ -3827,9 +4149,11 @@ ; RV64ZBA-NEXT: beqz a0, .LBB61_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB61_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -3866,9 +4190,11 @@ ; RV32-NEXT: beqz a0, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB62_2: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: @@ -3877,9 +4203,11 @@ ; RV64-NEXT: beqz a0, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB62_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i64: @@ -3903,9 +4231,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB62_2: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: @@ -3914,9 +4244,11 @@ ; RV64ZBA-NEXT: beqz a0, .LBB62_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB62_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -3945,9 +4277,11 @@ ; RV32-NEXT: beqz a0, .LBB63_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: mv a0, zero +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; RV32-NEXT: .LBB63_4: # %continue ; RV32-NEXT: addi a0, zero, 1 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: @@ -3956,9 +4290,11 @@ ; RV64-NEXT: bgeu a1, a0, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: mv a0, zero +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; RV64-NEXT: .LBB63_2: # %continue ; RV64-NEXT: addi a0, zero, 1 +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo2.br.i64: @@ -3974,9 +4310,11 @@ ; RV32ZBA-NEXT: beqz a0, .LBB63_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: mv a0, zero +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; RV32ZBA-NEXT: .LBB63_4: # %continue ; RV32ZBA-NEXT: addi a0, zero, 1 +; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: @@ -3985,9 +4323,11 @@ ; RV64ZBA-NEXT: bgeu a1, a0, .LBB63_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: mv a0, zero +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret ; RV64ZBA-NEXT: .LBB63_2: # %continue ; RV64ZBA-NEXT: addi a0, zero, 1 +; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2) diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected @@ -7,6 +7,7 @@ ; CHECK-LABEL: _Z54bar$ompvariant$bar: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi a0, zero, 2 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: ret i32 2